var/home/core/zuul-output/0000755000175000017500000000000015112336232014523 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112364462015475 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006554747515112364452017727 0ustar rootrootNov 28 15:19:19 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 15:19:19 crc restorecon[4763]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:19 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 15:19:20 crc restorecon[4763]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 15:19:20 crc kubenswrapper[4884]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.561791 4884 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565244 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565264 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565271 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565277 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565282 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565287 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565292 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565297 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565326 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565332 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565337 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565342 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565346 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565351 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565355 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565359 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565364 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565368 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565373 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565377 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565381 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565387 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565391 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565396 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565400 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565405 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565409 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565414 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565418 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565423 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565427 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565432 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565436 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565443 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565448 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565470 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565475 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565481 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565486 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565490 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565495 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565499 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565503 4884 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565508 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565513 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565518 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565522 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565527 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565531 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565537 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565544 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565562 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565567 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565572 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565576 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565581 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565587 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565591 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565596 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565600 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565605 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565610 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565614 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565619 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565623 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565630 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565636 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565642 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565647 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565652 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.565656 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565901 4884 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565914 4884 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565924 4884 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565931 4884 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565938 4884 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565943 4884 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565960 4884 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565967 4884 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565973 4884 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565978 4884 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565984 4884 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565989 4884 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.565995 4884 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566000 4884 flags.go:64] FLAG: --cgroup-root="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566005 4884 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566009 4884 flags.go:64] FLAG: --client-ca-file="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566014 4884 flags.go:64] FLAG: --cloud-config="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566021 4884 flags.go:64] FLAG: --cloud-provider="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566027 4884 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566033 4884 flags.go:64] FLAG: --cluster-domain="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566038 4884 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566043 4884 flags.go:64] FLAG: --config-dir="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566048 4884 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566064 4884 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566071 4884 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566077 4884 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566082 4884 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566102 4884 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566107 4884 flags.go:64] FLAG: --contention-profiling="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566113 4884 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566125 4884 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566131 4884 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566135 4884 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566142 4884 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566147 4884 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566152 4884 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566157 4884 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566162 4884 flags.go:64] FLAG: --enable-server="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566167 4884 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566173 4884 flags.go:64] FLAG: --event-burst="100" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566179 4884 flags.go:64] FLAG: --event-qps="50" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566184 4884 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566189 4884 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566194 4884 flags.go:64] FLAG: --eviction-hard="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566200 4884 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566207 4884 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566212 4884 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566218 4884 flags.go:64] FLAG: --eviction-soft="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566223 4884 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566228 4884 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566233 4884 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566238 4884 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566243 4884 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566248 4884 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566253 4884 flags.go:64] FLAG: --feature-gates="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566260 4884 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566265 4884 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566270 4884 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566276 4884 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566281 4884 flags.go:64] FLAG: --healthz-port="10248" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566287 4884 flags.go:64] FLAG: --help="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566292 4884 flags.go:64] FLAG: --hostname-override="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566297 4884 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566302 4884 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566307 4884 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566312 4884 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566317 4884 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566322 4884 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566327 4884 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566332 4884 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566337 4884 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566342 4884 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566348 4884 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566353 4884 flags.go:64] FLAG: --kube-reserved="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566358 4884 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566363 4884 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566368 4884 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566373 4884 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566378 4884 flags.go:64] FLAG: --lock-file="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566383 4884 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566388 4884 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566395 4884 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566404 4884 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566409 4884 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566414 4884 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566419 4884 flags.go:64] FLAG: --logging-format="text" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566424 4884 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566430 4884 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566435 4884 flags.go:64] FLAG: --manifest-url="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566440 4884 flags.go:64] FLAG: --manifest-url-header="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566446 4884 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566452 4884 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566459 4884 flags.go:64] FLAG: --max-pods="110" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566464 4884 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566469 4884 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566474 4884 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566479 4884 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566484 4884 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566489 4884 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566494 4884 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566506 4884 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566511 4884 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566516 4884 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566521 4884 flags.go:64] FLAG: --pod-cidr="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566526 4884 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566534 4884 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566539 4884 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566544 4884 flags.go:64] FLAG: --pods-per-core="0" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566549 4884 flags.go:64] FLAG: --port="10250" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566554 4884 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566559 4884 flags.go:64] FLAG: --provider-id="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566564 4884 flags.go:64] FLAG: --qos-reserved="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566569 4884 flags.go:64] FLAG: --read-only-port="10255" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566574 4884 flags.go:64] FLAG: --register-node="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566580 4884 flags.go:64] FLAG: --register-schedulable="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566585 4884 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566594 4884 flags.go:64] FLAG: --registry-burst="10" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566600 4884 flags.go:64] FLAG: --registry-qps="5" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566605 4884 flags.go:64] FLAG: --reserved-cpus="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566610 4884 flags.go:64] FLAG: --reserved-memory="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566616 4884 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566621 4884 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566635 4884 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566641 4884 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566646 4884 flags.go:64] FLAG: --runonce="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566652 4884 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566657 4884 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566662 4884 flags.go:64] FLAG: --seccomp-default="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566668 4884 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566673 4884 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566678 4884 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566683 4884 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566688 4884 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566693 4884 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566697 4884 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566702 4884 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566707 4884 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566712 4884 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566718 4884 flags.go:64] FLAG: --system-cgroups="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566723 4884 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566731 4884 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566736 4884 flags.go:64] FLAG: --tls-cert-file="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566741 4884 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566747 4884 flags.go:64] FLAG: --tls-min-version="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566752 4884 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566757 4884 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566762 4884 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566767 4884 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566772 4884 flags.go:64] FLAG: --v="2" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566780 4884 flags.go:64] FLAG: --version="false" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566786 4884 flags.go:64] FLAG: --vmodule="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566793 4884 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.566798 4884 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566941 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566949 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566954 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566960 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566964 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566969 4884 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566974 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566978 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566983 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566987 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566992 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.566997 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567003 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567008 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567013 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567017 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567022 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567026 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567031 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567035 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567040 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567044 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567048 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567053 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567057 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567062 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567067 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567071 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567076 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567080 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567085 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567109 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567116 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567123 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567128 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567133 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567139 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567144 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567148 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567153 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567157 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567162 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567167 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567171 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567176 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567180 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567184 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567189 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567193 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567198 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567202 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567207 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567211 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567216 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567220 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567224 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567228 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567237 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567241 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567246 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567250 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567256 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567262 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567268 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567273 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567277 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567282 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567287 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567292 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567297 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.567301 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.567317 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.576231 4884 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.576255 4884 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576741 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576761 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576769 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576777 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576784 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576790 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576805 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576811 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576818 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576823 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576828 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576834 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576839 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576844 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576850 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576857 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576865 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576872 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576877 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576887 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576892 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576899 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576905 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576910 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576917 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576922 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576929 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576938 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576944 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576949 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576953 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576962 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576967 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576972 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576976 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576981 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576986 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576990 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.576995 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577000 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577005 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577009 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577014 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577022 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577027 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577032 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577037 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577041 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577046 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577051 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577056 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577060 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577065 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577070 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577075 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577079 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577103 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577109 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577114 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577119 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577124 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577129 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577133 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577139 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577144 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577149 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577153 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577158 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577163 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577171 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577176 4884 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.577185 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577472 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577483 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577489 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577496 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577502 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577509 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577515 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577520 4884 feature_gate.go:330] unrecognized feature gate: Example Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577526 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577537 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577542 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577548 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577553 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577558 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577563 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577567 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577572 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577578 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577582 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577587 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577591 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577600 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577605 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577610 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577615 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577620 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577625 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577632 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577637 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577642 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577646 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577651 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577655 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577660 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577668 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577673 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577678 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577683 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577688 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577692 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577697 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577702 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577706 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577712 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577717 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577722 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577727 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577735 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577740 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577744 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577749 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577753 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577758 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577763 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577768 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577772 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577777 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577782 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577786 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577795 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577800 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577805 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577809 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577815 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577820 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577825 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577830 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577835 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577839 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577844 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.577849 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.577856 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.579228 4884 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.581960 4884 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.582277 4884 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.582725 4884 server.go:997] "Starting client certificate rotation" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.582751 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.583268 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-09 03:47:22.038130668 +0000 UTC Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.583414 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 996h28m1.454724528s for next certificate rotation Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.587115 4884 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.588620 4884 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.595707 4884 log.go:25] "Validated CRI v1 runtime API" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.614137 4884 log.go:25] "Validated CRI v1 image API" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.615572 4884 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.618227 4884 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-15-14-36-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.618267 4884 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.631235 4884 manager.go:217] Machine: {Timestamp:2025-11-28 15:19:20.629917318 +0000 UTC m=+0.192701139 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4c40ba81-7d04-41d3-b14d-2c4a4505250b BootID:80fb0112-20e3-4d9a-9db2-f4ba712ce894 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:bf:56:6d Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:bf:56:6d Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:67:88:22 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:57:f7:78 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:24:3f:21 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:e1:e2:dc Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:77:bc:6f Speed:-1 Mtu:1496} {Name:eth10 MacAddress:6e:3b:76:c9:73:05 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:9e:16:38:e1:ea:ef Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.631451 4884 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.631600 4884 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.631888 4884 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632033 4884 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632064 4884 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632256 4884 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632265 4884 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632450 4884 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632486 4884 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.632755 4884 state_mem.go:36] "Initialized new in-memory state store" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633132 4884 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633766 4884 kubelet.go:418] "Attempting to sync node with API server" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633784 4884 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633866 4884 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633884 4884 kubelet.go:324] "Adding apiserver pod source" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.633896 4884 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.639302 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.639415 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.639459 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.639537 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.639574 4884 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.639915 4884 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.640943 4884 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641477 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641506 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641515 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641524 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641537 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641546 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641555 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641569 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641579 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641588 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641600 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641608 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.641794 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.642301 4884 server.go:1280] "Started kubelet" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.642529 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.643191 4884 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.643218 4884 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.643794 4884 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 15:19:20 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645032 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645058 4884 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.644761 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.189:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c34c1b7f90e3a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:19:20.642276922 +0000 UTC m=+0.205060723,LastTimestamp:2025-11-28 15:19:20.642276922 +0000 UTC m=+0.205060723,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645333 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 16:49:41.82345084 +0000 UTC Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645434 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1081h30m21.17802146s for next certificate rotation Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.645439 4884 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645561 4884 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645597 4884 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.645676 4884 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.646026 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.646085 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646443 4884 factory.go:55] Registering systemd factory Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646469 4884 factory.go:221] Registration of the systemd container factory successfully Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646701 4884 server.go:460] "Adding debug handlers to kubelet server" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646896 4884 factory.go:153] Registering CRI-O factory Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646917 4884 factory.go:221] Registration of the crio container factory successfully Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.646978 4884 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.647012 4884 factory.go:103] Registering Raw factory Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.647030 4884 manager.go:1196] Started watching for new ooms in manager Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.646896 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="200ms" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.647828 4884 manager.go:319] Starting recovery of all containers Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660425 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660665 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660749 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660829 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660919 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.660993 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661070 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661168 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661246 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661336 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661416 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661496 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661571 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661693 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661777 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661865 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.661944 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662028 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662129 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662214 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662296 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662369 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662446 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662521 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662599 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662691 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662776 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662857 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.662935 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663011 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663107 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663191 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663269 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663343 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663424 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663492 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663574 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663649 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663747 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663821 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663895 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.663978 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664053 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664144 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664231 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664316 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664440 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664518 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664595 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664677 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664759 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664839 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.664933 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665012 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665109 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665196 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665298 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665382 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665463 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665536 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665605 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665674 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665753 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665827 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665904 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.665983 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666079 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666175 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666262 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666354 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666440 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666520 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666596 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666681 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666761 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666836 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666919 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666991 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667073 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667191 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667268 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667335 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667409 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667485 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667569 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667644 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667759 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667845 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.667922 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668000 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668130 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668215 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668284 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668349 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668431 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668509 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668588 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668666 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668742 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668808 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668890 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.668966 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669040 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669126 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669213 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669300 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669384 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669463 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669544 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669626 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669716 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669791 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669867 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.669948 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670036 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670306 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.666661 4884 manager.go:324] Recovery completed Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670394 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670600 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670628 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670640 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670651 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670662 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670684 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670693 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670703 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670714 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670724 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670733 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670742 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670751 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670764 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670780 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670796 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670811 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670824 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670834 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670845 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670859 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670868 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670878 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670889 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670899 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670909 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670918 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670930 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670940 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670950 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670962 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670971 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670981 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.670992 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671003 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671013 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671023 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671033 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671042 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671052 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671061 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671071 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671081 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671105 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671114 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671124 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671133 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671144 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671153 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671162 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671172 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671180 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671191 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671200 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671209 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671217 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671225 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671234 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671242 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671251 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671260 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671270 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671789 4884 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671820 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671832 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671843 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671854 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671864 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671875 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671885 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671895 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671905 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671914 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671924 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671933 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671942 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671952 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671963 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671972 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671980 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671990 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.671999 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672008 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672017 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672027 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672037 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672046 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672055 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672064 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672074 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672099 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672107 4884 reconstruct.go:97] "Volume reconstruction finished" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.672114 4884 reconciler.go:26] "Reconciler: start to sync state" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.683464 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.685684 4884 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.686136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.686184 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.686198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.686987 4884 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.687034 4884 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.687066 4884 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.687138 4884 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 15:19:20 crc kubenswrapper[4884]: W1128 15:19:20.687653 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.687717 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.688237 4884 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.688264 4884 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.688287 4884 state_mem.go:36] "Initialized new in-memory state store" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.698080 4884 policy_none.go:49] "None policy: Start" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.700215 4884 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.700246 4884 state_mem.go:35] "Initializing new in-memory state store" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.746604 4884 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.765535 4884 manager.go:334] "Starting Device Plugin manager" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.765629 4884 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.765652 4884 server.go:79] "Starting device plugin registration server" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.766392 4884 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.766421 4884 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.766603 4884 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.766788 4884 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.766805 4884 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.773363 4884 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.787689 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.787832 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.789573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.789667 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.789723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.790151 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.790385 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.790442 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.791837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.791868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.791882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792320 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792780 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.792819 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793604 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793693 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793711 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793738 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.793771 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794598 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794610 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794758 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.794959 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795032 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795436 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795808 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.795857 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.796911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.848010 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="400ms" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.867244 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.868698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.868782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.868797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.868827 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:19:20 crc kubenswrapper[4884]: E1128 15:19:20.869674 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.189:6443: connect: connection refused" node="crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873682 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873727 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873754 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873810 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873866 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873894 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.873990 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.874029 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.874124 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.874177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.874219 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.874263 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975731 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975866 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975897 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975925 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.975976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976016 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976152 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976274 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976274 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976368 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976380 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976429 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976460 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976493 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976537 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976576 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976586 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976619 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976655 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:20 crc kubenswrapper[4884]: I1128 15:19:20.976829 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.070791 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.072478 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.072527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.072545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.072576 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.073102 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.189:6443: connect: connection refused" node="crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.120288 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.132429 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.144255 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: W1128 15:19:21.165142 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-5de896760e92524fb1c12f0de90b826ac4011875416b0e3e3976e14880cf4801 WatchSource:0}: Error finding container 5de896760e92524fb1c12f0de90b826ac4011875416b0e3e3976e14880cf4801: Status 404 returned error can't find the container with id 5de896760e92524fb1c12f0de90b826ac4011875416b0e3e3976e14880cf4801 Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.173431 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.180727 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:21 crc kubenswrapper[4884]: W1128 15:19:21.193557 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-02021607f10aa30c5084ef8494a4ece01cf7a3e67f2990e965d0462a0012ad81 WatchSource:0}: Error finding container 02021607f10aa30c5084ef8494a4ece01cf7a3e67f2990e965d0462a0012ad81: Status 404 returned error can't find the container with id 02021607f10aa30c5084ef8494a4ece01cf7a3e67f2990e965d0462a0012ad81 Nov 28 15:19:21 crc kubenswrapper[4884]: W1128 15:19:21.196877 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-aee0693406d1aab6d664748f8746bb55ad6e9dfda833fa6ad9c7242322337c9e WatchSource:0}: Error finding container aee0693406d1aab6d664748f8746bb55ad6e9dfda833fa6ad9c7242322337c9e: Status 404 returned error can't find the container with id aee0693406d1aab6d664748f8746bb55ad6e9dfda833fa6ad9c7242322337c9e Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.249388 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="800ms" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.473992 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.475715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.475768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.475778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.475803 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.476255 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.189:6443: connect: connection refused" node="crc" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.643655 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.648370 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.189:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c34c1b7f90e3a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:19:20.642276922 +0000 UTC m=+0.205060723,LastTimestamp:2025-11-28 15:19:20.642276922 +0000 UTC m=+0.205060723,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.692731 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.692845 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bd0cd670a07b70c4e32e91437568bae0a76ff01bc0b2845cb2cb2957f27b9a65"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.694226 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e" exitCode=0 Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.694308 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.694343 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"aee0693406d1aab6d664748f8746bb55ad6e9dfda833fa6ad9c7242322337c9e"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.694440 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: W1128 15:19:21.694707 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.695003 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697457 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697812 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86" exitCode=0 Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697866 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697884 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"02021607f10aa30c5084ef8494a4ece01cf7a3e67f2990e965d0462a0012ad81"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.697967 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.698677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.698707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.698716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.699367 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.699946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.699971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.699979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.700485 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="034222a541bb814f61f69a88ef4436badaa1f0b78a689afae08d00083006231f" exitCode=0 Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.700542 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"034222a541bb814f61f69a88ef4436badaa1f0b78a689afae08d00083006231f"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.700559 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"d85453690045439eff4fe963a274f32aa7689c946b1d67e91366f5c0416e5d3f"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.700608 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.701262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.701280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.701287 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706224 4884 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446" exitCode=0 Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706268 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5de896760e92524fb1c12f0de90b826ac4011875416b0e3e3976e14880cf4801"} Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706329 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.706993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:21 crc kubenswrapper[4884]: I1128 15:19:21.707004 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:21 crc kubenswrapper[4884]: W1128 15:19:21.949518 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:21 crc kubenswrapper[4884]: E1128 15:19:21.949616 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:22 crc kubenswrapper[4884]: E1128 15:19:22.050048 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="1.6s" Nov 28 15:19:22 crc kubenswrapper[4884]: W1128 15:19:22.149366 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:22 crc kubenswrapper[4884]: E1128 15:19:22.149770 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:22 crc kubenswrapper[4884]: W1128 15:19:22.152751 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.189:6443: connect: connection refused Nov 28 15:19:22 crc kubenswrapper[4884]: E1128 15:19:22.152807 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.189:6443: connect: connection refused" logger="UnhandledError" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.276598 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.277909 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.277947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.277958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.277984 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.710418 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.710459 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.710470 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.710546 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.711262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.711285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.711292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.713756 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.713779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.713789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.713838 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.714432 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.714451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.714459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717676 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717702 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717713 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717722 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717731 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.717799 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.718330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.718353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.718363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.719598 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d" exitCode=0 Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.719645 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.719725 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.720275 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.720293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.720300 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.721844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"93c012b0fce62892e81d8576b0db93e4fff7f483a0d997510d98f8a6229b7dbd"} Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.721912 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.722569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.722594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.722605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:22 crc kubenswrapper[4884]: I1128 15:19:22.929794 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.727367 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4" exitCode=0 Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.727468 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.727510 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.727949 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4"} Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728042 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728232 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.728949 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.729024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.729060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.729510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.729531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:23 crc kubenswrapper[4884]: I1128 15:19:23.729538 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:24 crc kubenswrapper[4884]: I1128 15:19:24.734533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47"} Nov 28 15:19:24 crc kubenswrapper[4884]: I1128 15:19:24.734575 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134"} Nov 28 15:19:24 crc kubenswrapper[4884]: I1128 15:19:24.734588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184"} Nov 28 15:19:24 crc kubenswrapper[4884]: I1128 15:19:24.734598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b"} Nov 28 15:19:25 crc kubenswrapper[4884]: I1128 15:19:25.744175 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53"} Nov 28 15:19:25 crc kubenswrapper[4884]: I1128 15:19:25.744266 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:25 crc kubenswrapper[4884]: I1128 15:19:25.745558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:25 crc kubenswrapper[4884]: I1128 15:19:25.745615 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:25 crc kubenswrapper[4884]: I1128 15:19:25.745627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.175846 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.175976 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.176011 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.177177 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.177208 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.177217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.214680 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.214884 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.216733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.216785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.216804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.746592 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.747678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.747719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.747728 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.945944 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.946144 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.947517 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.947557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.947570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:26 crc kubenswrapper[4884]: I1128 15:19:26.952605 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.187727 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.330321 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.330570 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.331979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.332045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.332061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.749613 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.749699 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.750649 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.750807 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751292 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751308 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.751994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.752050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:27 crc kubenswrapper[4884]: I1128 15:19:27.752079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:28 crc kubenswrapper[4884]: I1128 15:19:28.528010 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:28 crc kubenswrapper[4884]: I1128 15:19:28.752943 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:28 crc kubenswrapper[4884]: I1128 15:19:28.753839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:28 crc kubenswrapper[4884]: I1128 15:19:28.753865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:28 crc kubenswrapper[4884]: I1128 15:19:28.753874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.567845 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.568143 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.569399 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.569438 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.569450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.754943 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.756196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.756223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:29 crc kubenswrapper[4884]: I1128 15:19:29.756232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:30 crc kubenswrapper[4884]: I1128 15:19:30.751750 4884 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:19:30 crc kubenswrapper[4884]: I1128 15:19:30.751874 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:19:30 crc kubenswrapper[4884]: E1128 15:19:30.773657 4884 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 15:19:32 crc kubenswrapper[4884]: E1128 15:19:32.278931 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 15:19:32 crc kubenswrapper[4884]: I1128 15:19:32.644147 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.328333 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.328436 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.337135 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.337372 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.879777 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.881623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.881692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.881713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:33 crc kubenswrapper[4884]: I1128 15:19:33.881754 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.183022 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.183318 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.184918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.184977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.184993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.187153 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.775045 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.776116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.776194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:36 crc kubenswrapper[4884]: I1128 15:19:36.776208 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.323279 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.324991 4884 trace.go:236] Trace[1360006889]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:19:24.627) (total time: 13697ms): Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[1360006889]: ---"Objects listed" error: 13697ms (15:19:38.324) Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[1360006889]: [13.697653987s] [13.697653987s] END Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.325028 4884 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.326445 4884 trace.go:236] Trace[311288097]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:19:24.211) (total time: 14115ms): Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[311288097]: ---"Objects listed" error: 14115ms (15:19:38.326) Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[311288097]: [14.115072966s] [14.115072966s] END Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.327005 4884 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.326539 4884 trace.go:236] Trace[2027640314]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:19:23.439) (total time: 14887ms): Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[2027640314]: ---"Objects listed" error: 14886ms (15:19:38.326) Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[2027640314]: [14.887011068s] [14.887011068s] END Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.327288 4884 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.328377 4884 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.329248 4884 trace.go:236] Trace[160893604]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 15:19:24.195) (total time: 14133ms): Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[160893604]: ---"Objects listed" error: 14133ms (15:19:38.329) Nov 28 15:19:38 crc kubenswrapper[4884]: Trace[160893604]: [14.13368838s] [14.13368838s] END Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.329277 4884 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.385894 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56854->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.385898 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56848->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.386041 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56848->192.168.126.11:17697: read: connection reset by peer" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.385945 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:56854->192.168.126.11:17697: read: connection reset by peer" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.386670 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.386703 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.387039 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.387069 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.429622 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.434218 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.440565 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.645278 4884 apiserver.go:52] "Watching apiserver" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.651522 4884 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.651706 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.652166 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652294 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652305 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.652731 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652432 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652421 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.652914 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.653624 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.660304 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.660577 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.660662 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.661217 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.662420 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.662989 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.663350 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.664344 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.665742 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.692479 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.704336 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.715489 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.723870 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.735984 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.746974 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.747212 4884 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.757117 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.766588 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.781923 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.783584 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc" exitCode=255 Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.783663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc"} Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.794953 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.804551 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.815815 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.831688 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.831994 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832239 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832348 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832458 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832566 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832671 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832741 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832827 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832891 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.832880 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833300 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833344 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833364 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833389 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833426 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833453 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833476 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833500 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833523 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833546 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833998 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833661 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833843 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833855 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833927 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833952 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.833988 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834226 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834030 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834295 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834309 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834303 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834375 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834397 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834406 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834474 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834499 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834507 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834522 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834547 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834570 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834593 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834592 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834617 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834640 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834662 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834684 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834710 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834731 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834753 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834775 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834802 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834861 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834883 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834905 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834947 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834969 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.834990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835012 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835035 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835059 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835084 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835132 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835176 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835273 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835320 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835386 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835413 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835394 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835439 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835534 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835597 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835641 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835675 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835711 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835745 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835805 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835838 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835866 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835892 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835917 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835942 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835969 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835993 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836018 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836046 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836070 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836121 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836152 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836173 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836282 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836306 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836330 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836354 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836381 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836420 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836446 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836470 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836496 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836522 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836548 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836578 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836627 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836671 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836694 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836751 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836780 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836803 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836826 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836854 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836883 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836909 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836934 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836986 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837012 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837036 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837061 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837129 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837160 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837186 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837213 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837239 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837292 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837350 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837376 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837411 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837436 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837488 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837511 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837559 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837611 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837636 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837662 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837688 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837714 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837738 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837795 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837813 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837833 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837854 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837899 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837916 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837934 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837965 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838010 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838232 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838250 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838270 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838297 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838333 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838353 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838374 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838393 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838445 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838471 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838495 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838518 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838545 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838566 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838587 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838639 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838676 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838695 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838714 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838734 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838753 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838771 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838789 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838836 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838863 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838911 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838937 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838964 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838984 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839003 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839028 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839059 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835741 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835926 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.835967 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836077 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836234 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836252 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836346 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839206 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.836479 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837201 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837392 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837679 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.837731 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838018 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838038 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838179 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838347 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838387 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838446 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838663 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838677 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838682 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.838865 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839021 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839631 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839719 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839910 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839942 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.839236 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840047 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840160 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840173 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840281 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840494 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840498 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840557 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840595 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840639 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840672 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840786 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840848 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840878 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.840851 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841104 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841128 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841145 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841163 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841178 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841279 4884 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841295 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841284 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841308 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841382 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841410 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841473 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841493 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841507 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841520 4884 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841530 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841539 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841572 4884 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841590 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841607 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841624 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841641 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841657 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841727 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841742 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841771 4884 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841786 4884 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841799 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841814 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841810 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841832 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841848 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841854 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841862 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841896 4884 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841917 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.841934 4884 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842016 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842040 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842056 4884 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842112 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842131 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842168 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.842299 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.842381 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:39.34236199 +0000 UTC m=+18.905145791 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.842446 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.842472 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:39.342465783 +0000 UTC m=+18.905249584 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842538 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842623 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842244 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842698 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842712 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842724 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842738 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842749 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842762 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842772 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842782 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842794 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842803 4884 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842813 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842832 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842921 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.842955 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.843042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.843427 4884 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.843621 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.845738 4884 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.846472 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.851178 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.851704 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.852016 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.852547 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.852699 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.854453 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.856116 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.856500 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.857048 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.857274 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.857542 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.857927 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.858337 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.858789 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.860202 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.861580 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.861977 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862003 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862016 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862103 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:39.362062273 +0000 UTC m=+18.924846284 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.862470 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862521 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862551 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862563 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.862626 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:39.362608335 +0000 UTC m=+18.925392356 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.863148 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.863431 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.863644 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.863752 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.863800 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.864133 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.864019 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.864063 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.864102 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.864377 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.865354 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.865424 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.866316 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.866743 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.866992 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.867500 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.867996 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.868150 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.868483 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.868696 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.868668 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.869289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.869615 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.869782 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.869870 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.870988 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.871437 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.871422 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.871528 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.871756 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.871941 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.872084 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.872164 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:19:39.371027269 +0000 UTC m=+18.933811280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.872175 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.872348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.872804 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.873053 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.873290 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.873904 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.875474 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.875490 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.876073 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.876499 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.876887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.876948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877318 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877341 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877619 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877622 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.877973 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.879355 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.879746 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.880938 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.882386 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.882474 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.882626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.886075 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.888380 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.889821 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.889860 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.889930 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.889985 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.890032 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.891797 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.892852 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.895190 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.895218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.898413 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.899993 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.904573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.907558 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.909230 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.910404 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.913429 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.913567 4884 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.913664 4884 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.914991 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915329 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.915600 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:38Z","lastTransitionTime":"2025-11-28T15:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.920948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.926487 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.928384 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.928383 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.928702 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.936990 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.938266 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.938931 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.939595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.939615 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.940876 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.940981 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.941536 4884 scope.go:117] "RemoveContainer" containerID="6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.942244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.942538 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.944372 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.944894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945327 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945475 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945493 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945506 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945516 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945527 4884 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945537 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945548 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945558 4884 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945566 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945578 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945590 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945601 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945612 4884 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945626 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945635 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945644 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945652 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945664 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945674 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945683 4884 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945691 4884 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945703 4884 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945711 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945720 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945731 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945739 4884 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945748 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945758 4884 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945772 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945780 4884 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945789 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945822 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945833 4884 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945842 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945853 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945864 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945874 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945885 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945894 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945907 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945918 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945930 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945953 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945973 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945986 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.945996 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946007 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946021 4884 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946030 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946039 4884 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946051 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946060 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946069 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946078 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946210 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946222 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946232 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946241 4884 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946346 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946357 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946366 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946383 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946392 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946401 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946414 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946423 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946433 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946440 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946444 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946508 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946526 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946550 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946577 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946593 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946609 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946628 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946643 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946657 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946671 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946689 4884 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946703 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946716 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946729 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946748 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946763 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946776 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946789 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946808 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946822 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946836 4884 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946853 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946866 4884 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946878 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946890 4884 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946907 4884 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946921 4884 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946934 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946947 4884 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946965 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946978 4884 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.946994 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947011 4884 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947025 4884 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947038 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947052 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947070 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947104 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947125 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947150 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947167 4884 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947181 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947194 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947208 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947230 4884 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947516 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.947634 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.949389 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.953562 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.953610 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: E1128 15:19:38.953903 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.961565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.961593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.961601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.961620 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.961628 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:38Z","lastTransitionTime":"2025-11-28T15:19:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.962611 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.962996 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.964217 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.964388 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.964544 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.964769 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.966755 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.966879 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.966883 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.966970 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.966607 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.968155 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.968478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.970623 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.970757 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.971455 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.971480 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.968485 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.972280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.972381 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.973299 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.973354 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.973676 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.974116 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.980705 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.980848 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 15:19:38 crc kubenswrapper[4884]: I1128 15:19:38.981285 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.000159 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.007145 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.010802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.010828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.010855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.010869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.010878 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.027273 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.031515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.031665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.031755 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.031828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.031905 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.043219 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049220 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049251 4884 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049261 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049270 4884 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049281 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049297 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049307 4884 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049327 4884 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049335 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049344 4884 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049352 4884 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049360 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049368 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049376 4884 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049385 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049394 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049402 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049410 4884 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049419 4884 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049427 4884 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049435 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049443 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049452 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049462 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049474 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049482 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049491 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049499 4884 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049508 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049522 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049676 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049701 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.049725 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.071558 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.071744 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.072008 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-4kfcr"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.072412 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078282 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078388 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078585 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.078762 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.079673 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.094998 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.106538 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.116371 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.128207 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.147301 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.157736 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.171268 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.180953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.180988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.181001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.181021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.181031 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.184452 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.195865 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.253418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4mjg\" (UniqueName: \"kubernetes.io/projected/552dad10-9ef9-4e53-ba05-00b44ae3a499-kube-api-access-w4mjg\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.253466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/552dad10-9ef9-4e53-ba05-00b44ae3a499-hosts-file\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.265855 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 15:19:39 crc kubenswrapper[4884]: W1128 15:19:39.278872 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-30b957b3ff457c2ae143f230279d3f914aae13ec418f2a97312dcf9ff620c1ca WatchSource:0}: Error finding container 30b957b3ff457c2ae143f230279d3f914aae13ec418f2a97312dcf9ff620c1ca: Status 404 returned error can't find the container with id 30b957b3ff457c2ae143f230279d3f914aae13ec418f2a97312dcf9ff620c1ca Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.282392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.282419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.282429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.282442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.282452 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.354410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.354467 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.354490 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/552dad10-9ef9-4e53-ba05-00b44ae3a499-hosts-file\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.354530 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4mjg\" (UniqueName: \"kubernetes.io/projected/552dad10-9ef9-4e53-ba05-00b44ae3a499-kube-api-access-w4mjg\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.354613 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.354644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/552dad10-9ef9-4e53-ba05-00b44ae3a499-hosts-file\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.354704 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:40.354683078 +0000 UTC m=+19.917466869 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.354697 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.354799 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:40.354776271 +0000 UTC m=+19.917560092 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.384592 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4mjg\" (UniqueName: \"kubernetes.io/projected/552dad10-9ef9-4e53-ba05-00b44ae3a499-kube-api-access-w4mjg\") pod \"node-resolver-4kfcr\" (UID: \"552dad10-9ef9-4e53-ba05-00b44ae3a499\") " pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.395293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.395339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.395350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.395367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.395379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.397226 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-4kfcr" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.445013 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-pwcbp"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.445767 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.446345 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-zj27d"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.446799 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: W1128 15:19:39.452816 4884 reflector.go:561] object-"openshift-machine-config-operator"/"proxy-tls": failed to list *v1.Secret: secrets "proxy-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-config-operator": no relationship found between node 'crc' and this object Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.452859 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-config-operator\"/\"proxy-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"proxy-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.454957 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455032 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455248 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455296 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455381 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455144 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:19:40.455117967 +0000 UTC m=+20.017901768 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455441 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455148 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455167 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455553 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455661 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.455677 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455680 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455781 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455833 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:40.455815633 +0000 UTC m=+20.018599504 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455662 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455912 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455922 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.455963 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:40.455954436 +0000 UTC m=+20.018738237 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.459324 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6wh6q"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.460611 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-kk24c"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.460803 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.461785 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.465741 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.465848 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.466109 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 15:19:39 crc kubenswrapper[4884]: W1128 15:19:39.466252 4884 reflector.go:561] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": failed to list *v1.Secret: secrets "multus-ancillary-tools-dockercfg-vnmsz" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.466295 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-vnmsz\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"multus-ancillary-tools-dockercfg-vnmsz\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.466364 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.466578 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.466725 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 15:19:39 crc kubenswrapper[4884]: W1128 15:19:39.466928 4884 reflector.go:561] object-"openshift-multus"/"default-cni-sysctl-allowlist": failed to list *v1.ConfigMap: configmaps "default-cni-sysctl-allowlist" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 28 15:19:39 crc kubenswrapper[4884]: E1128 15:19:39.466954 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"default-cni-sysctl-allowlist\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.468004 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.468205 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.488579 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.500237 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.500283 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.500295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.500313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.500336 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.505851 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.521344 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.543060 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556058 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-socket-dir-parent\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556337 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120c26c6-4231-418f-a5af-738dc44915f8-proxy-tls\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-binary-copy\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556400 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frpmx\" (UniqueName: \"kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556414 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556428 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-system-cni-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556526 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556573 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-netns\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556589 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-kubelet\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556610 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556629 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-etc-kubernetes\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556645 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/120c26c6-4231-418f-a5af-738dc44915f8-rootfs\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556668 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-cnibin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556701 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-os-release\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556757 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-k8s-cni-cncf-io\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-multus\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556891 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-cni-binary-copy\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556914 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-hostroot\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556953 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556975 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.556997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557046 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557065 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557101 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557132 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8b7w\" (UniqueName: \"kubernetes.io/projected/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-kube-api-access-g8b7w\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557155 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-bin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-conf-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557201 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557222 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557246 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557267 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-daemon-config\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c95sm\" (UniqueName: \"kubernetes.io/projected/237d188f-b799-4a82-bc67-c3a8fac5771f-kube-api-access-c95sm\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557312 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvt9v\" (UniqueName: \"kubernetes.io/projected/120c26c6-4231-418f-a5af-738dc44915f8-kube-api-access-tvt9v\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557346 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-multus-certs\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120c26c6-4231-418f-a5af-738dc44915f8-mcd-auth-proxy-config\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557384 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557402 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557421 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557436 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cnibin\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557452 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-os-release\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557467 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-system-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.557495 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.558559 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.576000 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.595177 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.602847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.602886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.602896 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.602911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.602924 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.611577 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.618156 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.632667 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.636081 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.653793 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658178 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658223 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-socket-dir-parent\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658259 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120c26c6-4231-418f-a5af-738dc44915f8-proxy-tls\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658278 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-binary-copy\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frpmx\" (UniqueName: \"kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658348 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-system-cni-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658348 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658397 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-socket-dir-parent\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658435 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-netns\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658406 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-netns\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658469 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658478 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-kubelet\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658496 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-system-cni-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658496 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/120c26c6-4231-418f-a5af-738dc44915f8-rootfs\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658521 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/120c26c6-4231-418f-a5af-738dc44915f8-rootfs\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658526 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658545 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-kubelet\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658545 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-etc-kubernetes\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-etc-kubernetes\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658575 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-cnibin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658610 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-os-release\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658658 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-cnibin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658660 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-k8s-cni-cncf-io\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-multus\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658725 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-os-release\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658728 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658752 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-cni-binary-copy\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658794 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-hostroot\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658812 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658826 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-bin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-conf-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658869 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-k8s-cni-cncf-io\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658903 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658924 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658970 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8b7w\" (UniqueName: \"kubernetes.io/projected/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-kube-api-access-g8b7w\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.658987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659004 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659021 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvt9v\" (UniqueName: \"kubernetes.io/projected/120c26c6-4231-418f-a5af-738dc44915f8-kube-api-access-tvt9v\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-daemon-config\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c95sm\" (UniqueName: \"kubernetes.io/projected/237d188f-b799-4a82-bc67-c3a8fac5771f-kube-api-access-c95sm\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659113 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-multus-certs\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659129 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120c26c6-4231-418f-a5af-738dc44915f8-mcd-auth-proxy-config\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659144 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cnibin\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-os-release\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659182 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659219 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-system-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659272 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-system-cni-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-binary-copy\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659296 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659340 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-hostroot\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659321 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-multus\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659368 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659380 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659390 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-var-lib-cni-bin\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659415 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659515 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-tuning-conf-dir\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659545 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cnibin\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659571 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659610 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-os-release\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659631 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-cni-binary-copy\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-conf-dir\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659663 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659705 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/237d188f-b799-4a82-bc67-c3a8fac5771f-host-run-multus-certs\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659754 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.659961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/120c26c6-4231-418f-a5af-738dc44915f8-mcd-auth-proxy-config\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.660052 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.660200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/237d188f-b799-4a82-bc67-c3a8fac5771f-multus-daemon-config\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.663057 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.688460 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.710431 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.712483 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.712520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.712534 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.712552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.712565 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.734958 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.749439 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.750078 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8b7w\" (UniqueName: \"kubernetes.io/projected/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-kube-api-access-g8b7w\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.750437 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c95sm\" (UniqueName: \"kubernetes.io/projected/237d188f-b799-4a82-bc67-c3a8fac5771f-kube-api-access-c95sm\") pod \"multus-zj27d\" (UID: \"237d188f-b799-4a82-bc67-c3a8fac5771f\") " pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.750440 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvt9v\" (UniqueName: \"kubernetes.io/projected/120c26c6-4231-418f-a5af-738dc44915f8-kube-api-access-tvt9v\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.750793 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frpmx\" (UniqueName: \"kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx\") pod \"ovnkube-node-6wh6q\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.757255 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.780642 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-zj27d" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.786122 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.792006 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.800133 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-4kfcr" event={"ID":"552dad10-9ef9-4e53-ba05-00b44ae3a499","Type":"ContainerStarted","Data":"d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.800185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-4kfcr" event={"ID":"552dad10-9ef9-4e53-ba05-00b44ae3a499","Type":"ContainerStarted","Data":"964c6f65a19b68dd62c29b223ddc1449e563d88e86309f125037ce03245b4441"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.803522 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c09126c0970614d976b3c71589e7d6997bcfb17ce7375444ec5212123e2a3605"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.808980 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.812490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.812535 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.815733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.815760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.815768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.815781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.815792 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.816147 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.816187 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"30b957b3ff457c2ae143f230279d3f914aae13ec418f2a97312dcf9ff620c1ca"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.820330 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.820404 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.820415 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"056ce4620b103b43467657550b9492024d7ee1b3be727c1539a4c52e3692cc31"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.834139 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.866985 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.887830 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.911772 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.917939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.917970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.917979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.917994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.918003 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:39Z","lastTransitionTime":"2025-11-28T15:19:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.930172 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.944862 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.973334 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:39 crc kubenswrapper[4884]: I1128 15:19:39.992889 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:39Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.012692 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.020535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.020580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.020593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.020611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.020623 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.036144 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.055720 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.080978 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.100079 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.117034 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.122863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.122907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.122918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.122937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.122952 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.134937 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.151876 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.170157 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.183893 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.205545 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.221802 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.225487 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.225730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.225808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.225889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.225974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.240879 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.257580 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.276122 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.328762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.328794 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.328804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.328820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.328831 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.363384 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.363444 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.363561 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.363663 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:42.363640914 +0000 UTC m=+21.926424715 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.363575 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.363746 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:42.363727286 +0000 UTC m=+21.926511137 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.432501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.432547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.432558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.432576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.432591 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.437899 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.449007 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/120c26c6-4231-418f-a5af-738dc44915f8-proxy-tls\") pod \"machine-config-daemon-pwcbp\" (UID: \"120c26c6-4231-418f-a5af-738dc44915f8\") " pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.464664 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.464780 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.464801 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.464849 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:19:42.46481004 +0000 UTC m=+22.027593841 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.464963 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.464980 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.464990 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.464987 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.465031 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.465042 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:42.465029425 +0000 UTC m=+22.027813226 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.465045 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.465137 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:42.465114937 +0000 UTC m=+22.027898738 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.488286 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.490849 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4a16f171-34b0-4f03-80dc-3b9bbd459bcc-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-kk24c\" (UID: \"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\") " pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.534578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.534630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.534642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.534659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.534672 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.580372 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.637530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.637577 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.637589 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.637609 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.637623 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.660205 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:19:40 crc kubenswrapper[4884]: W1128 15:19:40.670179 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod120c26c6_4231_418f_a5af_738dc44915f8.slice/crio-5c0822077a48f44a03122424e29292d8979580b23974b6e82c82770088b5b1e8 WatchSource:0}: Error finding container 5c0822077a48f44a03122424e29292d8979580b23974b6e82c82770088b5b1e8: Status 404 returned error can't find the container with id 5c0822077a48f44a03122424e29292d8979580b23974b6e82c82770088b5b1e8 Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.688858 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.688915 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.688964 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.689066 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.690143 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.700818 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.701511 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.702190 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-kk24c" Nov 28 15:19:40 crc kubenswrapper[4884]: E1128 15:19:40.689256 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.703046 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.703752 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.704900 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.705552 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.706282 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.707262 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.707992 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.710909 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.711499 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.713725 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.717904 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.719074 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.719840 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.721291 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.722312 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.724289 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.725260 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.725942 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.728577 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.729561 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.730882 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.731897 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.732689 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.734311 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.737586 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.739193 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.740877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.740913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.740921 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.740935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.740945 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.755956 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.756624 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.757429 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.759782 4884 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.759925 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.762357 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.762960 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.763881 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.773509 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.774478 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.775672 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.776412 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.779067 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.779608 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.780392 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.781465 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.781920 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.782425 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.782959 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.784846 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.785425 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.786631 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.787175 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.788012 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.788541 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.789297 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.789905 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.791387 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.824251 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.831474 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb" exitCode=0 Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.831536 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.831562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"e53d52067fa7c483c8c2177d0d4789bd1b081f25258cdccaaf210fcc93698a8f"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.840461 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerStarted","Data":"732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.840499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerStarted","Data":"8b0f319213c34e33599598412b317599462cc09a700bc17fd0335ec012500fb1"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.850353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.850520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.850537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.850681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.850702 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.851723 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerStarted","Data":"e85fa5c5268357672929985bf34ae0a401e4592f1ef0ee55453d3f49566fc8c6"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.855441 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.855612 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"5c0822077a48f44a03122424e29292d8979580b23974b6e82c82770088b5b1e8"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.880442 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.915363 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.938302 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.955280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.955336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.955348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.955367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.955377 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:40Z","lastTransitionTime":"2025-11-28T15:19:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.963115 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:40 crc kubenswrapper[4884]: I1128 15:19:40.995211 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.011239 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.033050 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.047983 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.058846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.058884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.058897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.058912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.058922 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.066771 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.081525 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.097344 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.122232 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.135565 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.139676 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-kbcrs"] Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.140104 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.145455 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.145526 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.147879 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.148110 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.152875 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.163199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.163229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.163239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.163256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.163267 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.167212 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.192397 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.207504 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.223506 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.237386 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.251276 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.262563 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.265501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.265540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.265549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.265564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.265575 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.281304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9vdn\" (UniqueName: \"kubernetes.io/projected/9b10879b-1f31-4575-823c-8f39cd85978d-kube-api-access-r9vdn\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.281348 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b10879b-1f31-4575-823c-8f39cd85978d-host\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.281427 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9b10879b-1f31-4575-823c-8f39cd85978d-serviceca\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.294249 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.337547 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.367894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.367941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.367955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.367972 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.367986 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.378606 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.382021 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9vdn\" (UniqueName: \"kubernetes.io/projected/9b10879b-1f31-4575-823c-8f39cd85978d-kube-api-access-r9vdn\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.382076 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b10879b-1f31-4575-823c-8f39cd85978d-host\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.382159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9b10879b-1f31-4575-823c-8f39cd85978d-serviceca\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.382325 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/9b10879b-1f31-4575-823c-8f39cd85978d-host\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.383177 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/9b10879b-1f31-4575-823c-8f39cd85978d-serviceca\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.426698 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9vdn\" (UniqueName: \"kubernetes.io/projected/9b10879b-1f31-4575-823c-8f39cd85978d-kube-api-access-r9vdn\") pod \"node-ca-kbcrs\" (UID: \"9b10879b-1f31-4575-823c-8f39cd85978d\") " pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.441952 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.450025 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-kbcrs" Nov 28 15:19:41 crc kubenswrapper[4884]: W1128 15:19:41.464099 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b10879b_1f31_4575_823c_8f39cd85978d.slice/crio-032f0af800197b16b4c332dc332ab2b7eeddc0ebf34f7b993e1dc796e0e612f0 WatchSource:0}: Error finding container 032f0af800197b16b4c332dc332ab2b7eeddc0ebf34f7b993e1dc796e0e612f0: Status 404 returned error can't find the container with id 032f0af800197b16b4c332dc332ab2b7eeddc0ebf34f7b993e1dc796e0e612f0 Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.469761 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.469805 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.469818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.469835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.469848 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.476321 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.517142 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.554332 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.573423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.573456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.573495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.573512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.573520 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.604198 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.636787 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.676791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.677123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.677211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.677298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.677364 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.685076 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.716400 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.764873 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.783080 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.783148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.783161 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.783179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.783191 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.795315 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.835833 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.861547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.866119 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.866171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.866185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.866198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.866210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.868220 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d" exitCode=0 Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.868455 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.872156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.872185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.873978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-kbcrs" event={"ID":"9b10879b-1f31-4575-823c-8f39cd85978d","Type":"ContainerStarted","Data":"032f0af800197b16b4c332dc332ab2b7eeddc0ebf34f7b993e1dc796e0e612f0"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.883512 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.885437 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.885482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.885495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.885513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.885535 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.915397 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.959651 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.990751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.990788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.990800 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.990820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.990833 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:41Z","lastTransitionTime":"2025-11-28T15:19:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:41 crc kubenswrapper[4884]: I1128 15:19:41.995692 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.043885 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.076705 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.093197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.093241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.093251 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.093266 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.093279 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.123051 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.155473 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.195741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.195780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.195789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.195833 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.195850 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.197212 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.240206 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.276766 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.298966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.299001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.299010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.299025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.299035 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.319267 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.354478 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.391081 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.391177 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.391197 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.391288 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:46.39126752 +0000 UTC m=+25.954051321 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.391382 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.391531 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:46.391490945 +0000 UTC m=+25.954274786 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.395923 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.401359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.401403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.401415 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.401432 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.401445 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.438952 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.475967 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.492057 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.492205 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492227 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:19:46.49219666 +0000 UTC m=+26.054980481 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.492280 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492463 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492530 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492551 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492493 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492621 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492637 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492635 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:46.49260957 +0000 UTC m=+26.055393391 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.492731 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:46.492710732 +0000 UTC m=+26.055494713 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.503899 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.503944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.503956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.503977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.503992 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.518977 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.556255 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.597377 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.606120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.606175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.606188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.606205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.606218 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.688059 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.688123 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.688073 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.688335 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.688474 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:42 crc kubenswrapper[4884]: E1128 15:19:42.688632 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.709605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.709655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.709668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.709689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.709701 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.812357 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.812405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.812417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.812434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.812444 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.882152 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.884801 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a" exitCode=0 Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.884905 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.886574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-kbcrs" event={"ID":"9b10879b-1f31-4575-823c-8f39cd85978d","Type":"ContainerStarted","Data":"43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.901498 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.914578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.914629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.914639 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.914657 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.914668 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:42Z","lastTransitionTime":"2025-11-28T15:19:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.925625 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.937288 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.957778 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.975771 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:42 crc kubenswrapper[4884]: I1128 15:19:42.991309 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:42Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.003027 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.021142 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.021954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.021980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.021989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.022003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.022012 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.033423 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.045842 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.063188 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.075399 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.122301 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.124508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.124543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.124555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.124579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.124590 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.157197 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.194896 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.226400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.226431 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.226442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.226459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.226471 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.237600 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.274201 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.317010 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.329331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.329365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.329376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.329391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.329402 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.355470 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.396559 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.432153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.432192 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.432204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.432222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.432234 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.434997 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.475934 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.515714 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.534392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.534425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.534438 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.534455 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.534466 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.555312 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.604452 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.635649 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.636874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.636929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.636941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.636963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.636977 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.674926 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.715748 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.739371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.739421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.739430 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.739445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.739456 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.764244 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.796676 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.842482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.842539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.842552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.842572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.842587 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.892545 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74" exitCode=0 Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.892659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.909155 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.929535 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.942231 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.945487 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.945536 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.946315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.946358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.946375 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:43Z","lastTransitionTime":"2025-11-28T15:19:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.960259 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:43 crc kubenswrapper[4884]: I1128 15:19:43.998734 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:43Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.038654 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.049562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.049603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.049616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.049636 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.049649 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.075409 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.116187 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.153032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.153277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.153373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.153502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.153580 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.155909 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.194168 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.237388 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.256625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.256664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.256672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.256686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.256695 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.280122 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.326677 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.359034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.359125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.359141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.359164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.359175 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.372427 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.393986 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.462471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.462520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.462539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.462564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.462577 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.566079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.566154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.566170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.566193 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.566208 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.669341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.669391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.669404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.669422 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.669433 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.687759 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.687813 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.687759 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:44 crc kubenswrapper[4884]: E1128 15:19:44.687921 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:44 crc kubenswrapper[4884]: E1128 15:19:44.688022 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:44 crc kubenswrapper[4884]: E1128 15:19:44.688185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.772162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.772207 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.772218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.772233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.772243 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.874877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.874914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.874932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.874952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.874965 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.901614 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.905117 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c" exitCode=0 Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.905160 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.921997 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.946413 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.980020 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.981163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.981212 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.981226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.981248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.981261 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:44Z","lastTransitionTime":"2025-11-28T15:19:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:44 crc kubenswrapper[4884]: I1128 15:19:44.993627 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:44Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.015917 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.028963 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.040215 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.053997 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.068864 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.084148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.084188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.084197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.084214 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.084224 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.094216 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.106754 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.124407 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.140746 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.154677 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.167209 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.187465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.187513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.187523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.187539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.187551 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.290237 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.290277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.290289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.290306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.290319 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.393602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.393647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.393659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.393678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.393694 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.496255 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.496324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.496345 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.496376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.496398 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.599357 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.599395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.599408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.599425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.599438 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.702023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.702166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.702191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.702221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.702241 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.804956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.805006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.805016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.805034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.805048 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.907810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.907880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.907907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.907939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.907962 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:45Z","lastTransitionTime":"2025-11-28T15:19:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.914300 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925" exitCode=0 Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.914362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925"} Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.934906 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.964322 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:45 crc kubenswrapper[4884]: I1128 15:19:45.982002 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.001941 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.010687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.010725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.010741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.010763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.010778 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.024780 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.060654 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.076739 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.092124 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.106430 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.113292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.113334 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.113347 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.113365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.113378 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.120985 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.133518 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.146820 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.157134 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.170135 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.184938 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:46Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.216530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.216565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.216579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.216597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.216609 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.319799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.319859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.319876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.319901 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.319921 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.423230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.423684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.423869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.424011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.424177 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.427474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.427556 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.427613 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.427681 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.427659076 +0000 UTC m=+33.990442877 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.427703 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.427776 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.427754878 +0000 UTC m=+33.990538719 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.527964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529203 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529259 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529280 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.528295 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.528268449 +0000 UTC m=+34.091052290 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.528176 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529510 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.529550 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.529764 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.529791 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.529809 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.529863 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.529847536 +0000 UTC m=+34.092631377 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.530506 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.530560 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.530585 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.530695 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.530662545 +0000 UTC m=+34.093446516 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.631836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.631869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.631880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.631894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.631902 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.687991 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.688153 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.688211 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.688002 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.688527 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:46 crc kubenswrapper[4884]: E1128 15:19:46.688442 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.735268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.735349 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.735371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.735400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.735421 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.838259 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.838327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.838350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.838379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.838398 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.941005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.941052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.941065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.941085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:46 crc kubenswrapper[4884]: I1128 15:19:46.941176 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:46Z","lastTransitionTime":"2025-11-28T15:19:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.046123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.046182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.046198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.046222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.046243 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.151388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.151428 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.151437 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.151453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.151465 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.254443 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.254484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.254494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.254510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.254520 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.357403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.357443 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.357452 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.357472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.357481 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.459860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.460118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.460128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.460144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.460153 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.563105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.563147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.563156 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.563172 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.563187 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.665433 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.665477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.665486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.665500 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.665510 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.768672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.768722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.768733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.768757 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.768770 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.873032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.873121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.873136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.873162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.873181 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.936770 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a16f171-34b0-4f03-80dc-3b9bbd459bcc" containerID="9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f" exitCode=0 Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.936844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerDied","Data":"9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.945887 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.946615 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.946672 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.946694 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.954222 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.967643 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.974231 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.975800 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.976636 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.976665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.976674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.976686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.976696 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:47Z","lastTransitionTime":"2025-11-28T15:19:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:47 crc kubenswrapper[4884]: I1128 15:19:47.985255 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:47Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.004144 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.020378 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.044632 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.058576 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.071432 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.080063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.080120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.080133 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.080150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.080161 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.086144 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.102078 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.117224 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.133741 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.150451 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.166277 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.178915 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.183434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.183472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.183482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.183498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.183509 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.197221 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.213590 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.224178 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.236481 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.264134 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.276767 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.287060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.287117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.287131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.287151 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.287162 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.293963 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.305697 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.320792 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.337152 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.356365 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.368876 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.387408 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.389748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.389782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.389796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.389819 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.389834 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.401054 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.415456 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.492231 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.492302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.492328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.492364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.492393 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.597553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.597625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.597642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.597668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.597686 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.687666 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.687718 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.687666 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:48 crc kubenswrapper[4884]: E1128 15:19:48.687809 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:48 crc kubenswrapper[4884]: E1128 15:19:48.687897 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:48 crc kubenswrapper[4884]: E1128 15:19:48.687963 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.700909 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.700970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.700982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.701002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.701039 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.803793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.803864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.803878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.803908 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.803924 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.907704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.907751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.907766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.907785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.907798 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:48Z","lastTransitionTime":"2025-11-28T15:19:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.953854 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" event={"ID":"4a16f171-34b0-4f03-80dc-3b9bbd459bcc","Type":"ContainerStarted","Data":"a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c"} Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.969797 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:48 crc kubenswrapper[4884]: I1128 15:19:48.990800 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:48Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.005185 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.016967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.017022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.017034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.017054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.017072 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.019649 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.037940 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.061719 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.076913 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.095375 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.107886 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.120135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.120199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.120213 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.120242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.120255 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.126598 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.140053 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.155693 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.175672 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.191897 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.207371 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.223166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.223225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.223242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.223268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.223287 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.326776 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.326825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.326841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.326863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.326881 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.430366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.430436 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.430456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.430480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.430497 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.459931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.459980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.459998 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.460024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.460043 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.476601 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.482241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.482306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.482317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.482338 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.482350 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.499520 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.507788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.507839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.507855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.507876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.507891 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.523919 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.529641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.529684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.529697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.529721 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.529737 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.544213 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.548829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.548879 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.548893 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.548916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.548931 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.565154 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: E1128 15:19:49.565321 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.567190 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.567266 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.567284 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.567314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.567326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.670769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.670835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.670850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.670873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.670890 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.773527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.773586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.773604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.773627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.773645 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.883882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.884350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.884366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.884388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.884403 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.958690 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/0.log" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.962509 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801" exitCode=1 Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.962578 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.963795 4884 scope.go:117] "RemoveContainer" containerID="1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.979246 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.993307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.993365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.993378 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.993400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.993416 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:49Z","lastTransitionTime":"2025-11-28T15:19:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:49 crc kubenswrapper[4884]: I1128 15:19:49.998382 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:49Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.015664 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.033783 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.046667 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.062413 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.078308 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.095849 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.097150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.097187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.097198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.097215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.097228 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.110111 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.130925 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.145866 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.162485 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.179438 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.199459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.199509 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.199520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.199536 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.199546 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.200321 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.223330 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.301784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.301832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.301844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.301862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.301875 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.404792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.404846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.404885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.404906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.404920 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.507862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.507907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.507918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.507938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.507977 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.611058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.611410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.611484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.611562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.611637 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.688078 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.688140 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.688159 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:50 crc kubenswrapper[4884]: E1128 15:19:50.688310 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:50 crc kubenswrapper[4884]: E1128 15:19:50.688414 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:50 crc kubenswrapper[4884]: E1128 15:19:50.688517 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.706649 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.715016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.715116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.715130 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.715153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.715168 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.722848 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.742870 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.756736 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.768985 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.786796 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.806922 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.817887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.817949 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.817963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.817987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.818002 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.822651 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.837229 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.858583 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.873545 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.889309 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.906607 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.921172 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.921212 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.921225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.921244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.921259 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:50Z","lastTransitionTime":"2025-11-28T15:19:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.924215 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.939978 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.968528 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/0.log" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.972561 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0"} Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.973200 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:19:50 crc kubenswrapper[4884]: I1128 15:19:50.988872 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.011034 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024038 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024851 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.024887 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.040043 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.056423 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.075022 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.091956 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.109172 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.128109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.128160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.128171 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.128192 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.128205 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.129376 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.145156 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.159748 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.176980 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.206341 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.226328 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.231573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.231638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.231654 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.231686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.231703 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.257661 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.334467 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.334528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.334546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.334573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.334592 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.437872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.438022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.438047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.438072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.438128 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.541120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.541177 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.541193 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.541217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.541232 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.645512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.645578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.645593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.645613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.645628 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.748640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.749343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.749459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.749601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.749707 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.852241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.852305 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.852323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.852348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.852366 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.955560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.955618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.955640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.955668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:51 crc kubenswrapper[4884]: I1128 15:19:51.955688 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:51Z","lastTransitionTime":"2025-11-28T15:19:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.058507 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.058552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.058560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.058577 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.058588 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.075430 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs"] Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.075901 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.078675 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.078790 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.092398 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.096617 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.096786 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.096913 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmxj6\" (UniqueName: \"kubernetes.io/projected/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-kube-api-access-nmxj6\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.097130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.114266 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.133416 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.150842 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.162258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.162306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.162317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.162335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.162346 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.173567 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.187171 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.198755 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.199117 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.199271 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.199392 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmxj6\" (UniqueName: \"kubernetes.io/projected/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-kube-api-access-nmxj6\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.199677 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.200452 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.205082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.215945 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.219213 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmxj6\" (UniqueName: \"kubernetes.io/projected/8ac0e409-0044-4f7e-b4f0-565171a4ff9a-kube-api-access-nmxj6\") pod \"ovnkube-control-plane-749d76644c-glcqs\" (UID: \"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.234864 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.249683 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265574 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.265887 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.286614 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.304232 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.318056 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.333897 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.354413 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.368470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.368508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.368539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.368559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.368571 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.369409 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:52Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.396105 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" Nov 28 15:19:52 crc kubenswrapper[4884]: W1128 15:19:52.423000 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ac0e409_0044_4f7e_b4f0_565171a4ff9a.slice/crio-e5e41ad3c879aa42be5e3faecb38adfac1b79519cf7b081fa6ae579c377cc4e1 WatchSource:0}: Error finding container e5e41ad3c879aa42be5e3faecb38adfac1b79519cf7b081fa6ae579c377cc4e1: Status 404 returned error can't find the container with id e5e41ad3c879aa42be5e3faecb38adfac1b79519cf7b081fa6ae579c377cc4e1 Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.472454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.472516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.472530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.472554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.472570 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.575768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.575816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.575827 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.575844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.575855 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.679815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.680292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.680306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.680325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.680339 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.687945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:52 crc kubenswrapper[4884]: E1128 15:19:52.688078 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.688375 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:52 crc kubenswrapper[4884]: E1128 15:19:52.688538 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.688598 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:52 crc kubenswrapper[4884]: E1128 15:19:52.688641 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.783390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.783454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.783469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.783493 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.783505 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.886791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.886854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.886864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.886884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.886897 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.980637 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" event={"ID":"8ac0e409-0044-4f7e-b4f0-565171a4ff9a","Type":"ContainerStarted","Data":"e5e41ad3c879aa42be5e3faecb38adfac1b79519cf7b081fa6ae579c377cc4e1"} Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.988727 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.988791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.988816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.988852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:52 crc kubenswrapper[4884]: I1128 15:19:52.988877 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:52Z","lastTransitionTime":"2025-11-28T15:19:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.091524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.091578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.091591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.091611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.091625 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.195314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.195451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.195463 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.195492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.195512 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.299660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.299714 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.299726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.299747 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.299758 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.402387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.402840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.402934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.403051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.403422 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.508705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.508815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.508847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.508888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.508927 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.568294 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-5nbz9"] Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.568879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: E1128 15:19:53.568960 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.599190 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.613254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.613304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.613316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.613339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.613352 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.616475 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.616602 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvq78\" (UniqueName: \"kubernetes.io/projected/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-kube-api-access-fvq78\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.616954 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.632261 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.656505 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.681671 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.698408 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.711273 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.715524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.715566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.715577 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.715594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.715606 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.717743 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.717777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvq78\" (UniqueName: \"kubernetes.io/projected/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-kube-api-access-fvq78\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: E1128 15:19:53.717857 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:53 crc kubenswrapper[4884]: E1128 15:19:53.717919 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:54.217901339 +0000 UTC m=+33.780685140 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.725703 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.744129 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.748874 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvq78\" (UniqueName: \"kubernetes.io/projected/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-kube-api-access-fvq78\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.756218 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.768300 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.780684 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.795999 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.810665 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.818529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.818582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.818593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.818612 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.818628 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.830276 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.848902 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.863319 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:53Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.921412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.921449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.921459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.921474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.921485 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:53Z","lastTransitionTime":"2025-11-28T15:19:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.987427 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/1.log" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.988490 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/0.log" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.991842 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0" exitCode=1 Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.991892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0"} Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.991939 4884 scope.go:117] "RemoveContainer" containerID="1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801" Nov 28 15:19:53 crc kubenswrapper[4884]: I1128 15:19:53.993382 4884 scope.go:117] "RemoveContainer" containerID="83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0" Nov 28 15:19:53 crc kubenswrapper[4884]: E1128 15:19:53.993678 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.016348 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.024465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.024507 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.024518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.024533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.024543 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.038269 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.060144 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.075454 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.094010 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.114407 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.127791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.127861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.127878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.127902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.127919 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.131606 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.149369 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.171653 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.210777 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.223480 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.223776 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.223896 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:55.223863391 +0000 UTC m=+34.786647312 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231609 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231679 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231576 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231703 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.231986 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.262003 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.282066 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.298244 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.319017 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.335138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.335200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.335223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.335247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.335267 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.343372 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.360808 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:54Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.438460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.438496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.438506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.438523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.438534 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.528084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.528178 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.528258 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.528282 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.528316 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:10.52830295 +0000 UTC m=+50.091086751 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.528419 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:10.528390882 +0000 UTC m=+50.091174883 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.541039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.541076 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.541098 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.541117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.541128 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.628878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.629026 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629130 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:20:10.629070227 +0000 UTC m=+50.191854068 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629187 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629213 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.629212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629227 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629412 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:10.629396504 +0000 UTC m=+50.192180305 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629286 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629434 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629441 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.629459 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:10.629454175 +0000 UTC m=+50.192237976 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.643488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.643731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.643792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.643859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.643952 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.688245 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.688406 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.688539 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.688576 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.688765 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:54 crc kubenswrapper[4884]: E1128 15:19:54.688972 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.745907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.745978 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.745992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.746012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.746025 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.849015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.849060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.849073 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.849106 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.849121 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.951690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.951748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.951758 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.951777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:54 crc kubenswrapper[4884]: I1128 15:19:54.951789 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:54Z","lastTransitionTime":"2025-11-28T15:19:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.000265 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" event={"ID":"8ac0e409-0044-4f7e-b4f0-565171a4ff9a","Type":"ContainerStarted","Data":"077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.000337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" event={"ID":"8ac0e409-0044-4f7e-b4f0-565171a4ff9a","Type":"ContainerStarted","Data":"33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.002639 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/1.log" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.021769 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.039847 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.051190 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.054441 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.054489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.054502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.054524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.054536 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.076671 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.097808 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.115741 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.128982 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.143680 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.157034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.157144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.157173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.157205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.157228 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.164062 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.180004 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.193352 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.213770 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.227203 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.235928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:55 crc kubenswrapper[4884]: E1128 15:19:55.236108 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:55 crc kubenswrapper[4884]: E1128 15:19:55.236208 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:19:57.236187665 +0000 UTC m=+36.798971466 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.240521 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.251168 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.259993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.260041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.260056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.260077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.260116 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.270280 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.282637 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:55Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.363008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.363070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.363138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.363163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.363185 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.466897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.466963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.466981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.467008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.467029 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.570183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.570254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.570277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.570303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.570321 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.672683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.673050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.673298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.673548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.673723 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.687853 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:55 crc kubenswrapper[4884]: E1128 15:19:55.688078 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.776324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.776364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.776374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.776389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.776399 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.878510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.878561 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.878573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.878591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.878605 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.981585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.981680 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.981705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.981743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:55 crc kubenswrapper[4884]: I1128 15:19:55.981770 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:55Z","lastTransitionTime":"2025-11-28T15:19:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.084274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.084345 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.084362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.084389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.084406 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.187552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.187580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.187591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.187606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.187614 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.290182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.290224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.290235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.290253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.290269 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.393182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.393234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.393244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.393260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.393271 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.495707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.495759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.495777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.495800 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.495817 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.599343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.599401 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.599417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.599442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.599459 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.688458 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.688557 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.688707 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:56 crc kubenswrapper[4884]: E1128 15:19:56.688875 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:56 crc kubenswrapper[4884]: E1128 15:19:56.688929 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:56 crc kubenswrapper[4884]: E1128 15:19:56.688998 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.703884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.703956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.703974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.703997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.704035 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.807504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.807562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.807579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.807605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.807624 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.911413 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.911496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.911521 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.911555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:56 crc kubenswrapper[4884]: I1128 15:19:56.911578 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:56Z","lastTransitionTime":"2025-11-28T15:19:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.013485 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.013543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.013556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.013572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.013583 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.116775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.116833 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.116848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.116867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.116880 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.224888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.224927 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.224934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.224951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.224962 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.259822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:57 crc kubenswrapper[4884]: E1128 15:19:57.260083 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:57 crc kubenswrapper[4884]: E1128 15:19:57.260281 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:01.260257517 +0000 UTC m=+40.823041328 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.328398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.328519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.328534 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.328561 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.328578 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.432677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.432741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.432759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.432790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.432809 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.536954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.537018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.537029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.537046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.537058 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.640592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.640702 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.640727 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.640760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.640783 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.687565 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:57 crc kubenswrapper[4884]: E1128 15:19:57.687744 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.743665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.743739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.743762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.743795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.743822 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.756773 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.777723 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.807713 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.827380 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.846940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.847139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.847154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.847172 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.847185 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.863126 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.883668 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.904128 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.919057 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.941385 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.949988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.950050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.950068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.950127 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.950149 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:57Z","lastTransitionTime":"2025-11-28T15:19:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.966948 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:57 crc kubenswrapper[4884]: I1128 15:19:57.985244 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:57Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.008387 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.023653 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.036888 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.050071 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.053221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.053299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.053319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.053346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.053365 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.071735 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.087605 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.099174 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:58Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.156153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.156206 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.156222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.156245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.156264 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.259281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.259561 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.259805 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.260017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.260258 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.363302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.363363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.363376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.363396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.363410 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.466899 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.466946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.466955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.466971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.466982 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.569570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.569607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.569619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.569639 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.569649 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.672535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.672588 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.672607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.672630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.672641 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.688330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.688409 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.688462 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:19:58 crc kubenswrapper[4884]: E1128 15:19:58.688693 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:19:58 crc kubenswrapper[4884]: E1128 15:19:58.688786 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:19:58 crc kubenswrapper[4884]: E1128 15:19:58.688900 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.775725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.775782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.775803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.775832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.775853 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.878489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.878945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.879129 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.879223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.879354 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.982107 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.982145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.982155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.982170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:58 crc kubenswrapper[4884]: I1128 15:19:58.982179 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:58Z","lastTransitionTime":"2025-11-28T15:19:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.085347 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.085405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.085425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.085449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.085464 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.188941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.188997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.189009 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.189032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.189046 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.291025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.291120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.291131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.291148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.291159 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.393891 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.393967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.393984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.394009 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.394027 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.496699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.496759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.496776 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.496803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.496821 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.599443 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.599543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.599563 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.599587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.599604 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.687720 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:19:59 crc kubenswrapper[4884]: E1128 15:19:59.687866 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.702145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.702186 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.702198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.702218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.702231 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.804756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.804801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.804810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.804828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.804840 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.907942 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.908014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.908031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.908125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.908140 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.924967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.925346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.925359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.925373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.925382 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: E1128 15:19:59.941412 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.945240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.945291 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.945305 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.945321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.945333 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: E1128 15:19:59.959885 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.964877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.964963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.964989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.965024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.965063 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:19:59 crc kubenswrapper[4884]: E1128 15:19:59.990512 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:19:59Z is after 2025-08-24T17:21:41Z" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.996875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.996914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.996931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.996948 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:19:59 crc kubenswrapper[4884]: I1128 15:19:59.996958 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:19:59Z","lastTransitionTime":"2025-11-28T15:19:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.010468 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.015652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.015693 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.015704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.015726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.015738 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.032784 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.032944 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.034426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.034477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.034494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.034520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.034538 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.137628 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.137712 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.137728 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.137754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.137768 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.240652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.240692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.240703 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.240719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.240729 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.343293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.343341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.343353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.343371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.343383 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.447243 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.447316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.447341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.447371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.447395 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.549977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.550018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.550032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.550050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.550062 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.652928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.652986 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.653004 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.653025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.653043 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.687832 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.687892 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.688054 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.688121 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.688238 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:00 crc kubenswrapper[4884]: E1128 15:20:00.688423 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.701626 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.713527 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.722483 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.732259 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.744060 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.755366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.755416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.755441 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.755471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.755494 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.767303 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1006f122deaf056d5c414d85f097ac7c90324cf52e35a2c5b25d555d81caa801\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:49Z\\\",\\\"message\\\":\\\"tor *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1128 15:19:49.590933 6161 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591803 6161 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 15:19:49.591832 6161 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 15:19:49.591835 6161 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 15:19:49.591950 6161 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 15:19:49.591973 6161 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 15:19:49.591979 6161 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:49.592006 6161 factory.go:656] Stopping watch factory\\\\nI1128 15:19:49.592032 6161 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:49.592065 6161 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 15:19:49.592080 6161 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 15:19:49.592104 6161 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 15:19:49.592113 6161 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.793677 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.824769 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.841716 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.858917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.858963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.858973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.859000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.859013 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.859048 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.872889 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.888684 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.899516 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.917669 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.932108 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.946139 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.959116 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:00Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.961489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.961527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.961539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.961559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:00 crc kubenswrapper[4884]: I1128 15:20:00.961571 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:00Z","lastTransitionTime":"2025-11-28T15:20:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.064038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.064135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.064149 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.064174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.064190 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.167371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.167414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.167423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.167438 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.167450 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.270296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.270371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.270381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.270422 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.270437 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.307167 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:01 crc kubenswrapper[4884]: E1128 15:20:01.307385 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:01 crc kubenswrapper[4884]: E1128 15:20:01.307466 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:09.307447642 +0000 UTC m=+48.870231443 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.373322 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.373380 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.373394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.373419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.373435 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.476013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.476050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.476060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.476073 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.476084 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.583446 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.583513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.583661 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.583758 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.583789 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687430 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.687893 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: E1128 15:20:01.687899 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.791324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.791366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.791376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.791398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.791415 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.894336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.894401 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.894418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.894443 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.894461 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.998754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.998819 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.998838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.998866 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:01 crc kubenswrapper[4884]: I1128 15:20:01.998885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:01Z","lastTransitionTime":"2025-11-28T15:20:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.102390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.102459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.102480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.102510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.102531 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.204449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.204495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.204505 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.204520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.204533 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.307440 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.307509 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.307526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.307552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.307570 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.411002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.411053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.411067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.411119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.411136 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.514215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.514252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.514263 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.514281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.514293 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.616801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.616844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.616852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.616870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.616881 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.688238 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.688319 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.688260 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:02 crc kubenswrapper[4884]: E1128 15:20:02.688411 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:02 crc kubenswrapper[4884]: E1128 15:20:02.688492 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:02 crc kubenswrapper[4884]: E1128 15:20:02.688628 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.719370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.719425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.719434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.719450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.719458 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.821604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.821670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.821744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.821775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.821794 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.925475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.925544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.925557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.925579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:02 crc kubenswrapper[4884]: I1128 15:20:02.925594 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:02Z","lastTransitionTime":"2025-11-28T15:20:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.028258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.028388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.028402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.028418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.028428 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.130848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.130898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.130911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.130931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.130945 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.233137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.233194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.233209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.233228 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.233240 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.336740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.336798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.336809 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.336828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.336839 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.439989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.440051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.440065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.440085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.440464 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.546250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.546318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.546338 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.546375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.546394 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.648881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.648931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.648942 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.648958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.648968 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.687752 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:03 crc kubenswrapper[4884]: E1128 15:20:03.687938 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.751981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.752030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.752041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.752058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.752070 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.855314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.855388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.855405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.855431 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.855449 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.958889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.958954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.958972 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.958997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:03 crc kubenswrapper[4884]: I1128 15:20:03.959016 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:03Z","lastTransitionTime":"2025-11-28T15:20:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.061536 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.061607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.061630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.061660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.061684 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.165016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.165131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.165170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.165202 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.165225 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.268868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.268944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.268985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.269022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.269066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.372396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.372454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.372474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.372498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.372520 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.476170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.476231 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.476247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.476276 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.476293 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.580410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.580482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.580504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.580537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.580560 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.683970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.684060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.684084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.684146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.684186 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.687381 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.687453 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.687389 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:04 crc kubenswrapper[4884]: E1128 15:20:04.687575 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:04 crc kubenswrapper[4884]: E1128 15:20:04.687667 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:04 crc kubenswrapper[4884]: E1128 15:20:04.687785 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.787109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.787137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.787147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.787160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.787169 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.890479 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.890549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.890566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.890592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.890610 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.993862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.993948 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.993999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.994034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:04 crc kubenswrapper[4884]: I1128 15:20:04.994063 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:04Z","lastTransitionTime":"2025-11-28T15:20:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.097300 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.097375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.097394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.097421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.097441 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.200641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.200692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.200709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.200732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.200750 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.303387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.303426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.303435 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.303449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.303458 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.407022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.407069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.407081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.407116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.407129 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.510233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.510298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.510322 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.510354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.510376 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.613754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.613825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.613849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.613881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.613903 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.687769 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:05 crc kubenswrapper[4884]: E1128 15:20:05.687975 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.689587 4884 scope.go:117] "RemoveContainer" containerID="83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.714441 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.717056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.717118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.717133 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.717155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.717168 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.736924 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.753308 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.781380 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.793465 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.807740 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.819518 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.819902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.819940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.819956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.820021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.820048 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.833965 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.854217 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.867797 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.883858 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.897719 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.914591 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.923222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.923277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.923290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.923310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.923320 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:05Z","lastTransitionTime":"2025-11-28T15:20:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.927728 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.944181 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.963853 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:05 crc kubenswrapper[4884]: I1128 15:20:05.980408 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:05Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.025905 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.025950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.025964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.025984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.025997 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.129160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.129204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.129215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.129234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.129246 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.237137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.237204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.237221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.237250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.237274 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.340119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.340162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.340174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.340193 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.340204 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.442876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.442915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.442926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.442943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.442955 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.546028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.546065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.546074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.546108 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.546135 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.648342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.648407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.648430 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.648459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.648480 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.687962 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:06 crc kubenswrapper[4884]: E1128 15:20:06.688135 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.688218 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.688340 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:06 crc kubenswrapper[4884]: E1128 15:20:06.688369 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:06 crc kubenswrapper[4884]: E1128 15:20:06.688524 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.752039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.752081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.752114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.752132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.752144 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.854780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.855041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.855249 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.855390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.855491 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.958465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.958763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.958828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.958915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:06 crc kubenswrapper[4884]: I1128 15:20:06.958979 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:06Z","lastTransitionTime":"2025-11-28T15:20:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.052652 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/1.log" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.054922 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.055308 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.060991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.061019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.061027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.061040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.061049 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.070651 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.087309 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.099165 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.108964 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.121083 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.131260 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.141516 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.154580 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.163296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.163424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.163482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.163553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.163608 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.168784 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.189808 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.202146 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.221317 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.232539 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.244903 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.259589 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.266207 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.266256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.266268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.266286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.266297 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.274627 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.285206 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.335191 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.346261 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.354337 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.367524 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.368994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.369034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.369071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.369155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.369212 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.380342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.398975 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.412668 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.423815 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.435270 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.455662 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.471111 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.471670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.471804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.471873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.471938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.472002 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.485631 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.502168 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.519820 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.530021 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.539754 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.554961 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.573131 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.574677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.574706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.574718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.574733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.574744 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.587390 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:07Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.678072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.678188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.678200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.678224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.678242 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.687628 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:07 crc kubenswrapper[4884]: E1128 15:20:07.688026 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.781594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.781998 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.782203 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.782359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.782493 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.886470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.886578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.886597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.886623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.886643 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.989199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.989499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.989664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.989769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:07 crc kubenswrapper[4884]: I1128 15:20:07.989864 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:07Z","lastTransitionTime":"2025-11-28T15:20:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.060933 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/2.log" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.062540 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/1.log" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.066431 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" exitCode=1 Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.066581 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.066717 4884 scope.go:117] "RemoveContainer" containerID="83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.067186 4884 scope.go:117] "RemoveContainer" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" Nov 28 15:20:08 crc kubenswrapper[4884]: E1128 15:20:08.067358 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.082707 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.092557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.092610 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.092628 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.092653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.092670 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.106123 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.120720 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.135879 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.154110 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.172686 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.192428 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.195256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.195288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.195298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.195313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.195322 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.203249 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.217671 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.230527 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.253673 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.270506 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.284673 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.298248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.298294 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.298306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.298323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.298333 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.299412 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.324908 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://83ecc63f91368ebfd0f4f9537d671cf2a53aac9e3cfb7e22238c2f60320edce0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"ter]} options:{GoMap:map[requested-tnl-key:2 router-port:rtots-crc]} port_security:{GoSet:[]} tag_request:{GoSet:[]} type:router] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:8b3ef3d2-aa1a-4ff5-b390-b2bd8f0241f3}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {3cb9854d-2900-4fd0-baba-4bfcad667b19}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:19:52.388189 6320 obj_retry.go:551] Creating *v1.Node crc took: 1.882048588s\\\\nI1128 15:19:52.388239 6320 factory.go:1336] Added *v1.Node event handler 2\\\\nI1128 15:19:52.388278 6320 services_controller.go:189] Starting controller ovn-lb-controller for network=default\\\\nI1128 15:19:52.388327 6320 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 15:19:52.388516 6320 factory.go:656] Stopping watch factory\\\\nI1128 15:19:52.388522 6320 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 15:19:52.388540 6320 ovnkube.go:599] Stopped ovnkube\\\\nI1128 15:19:52.388649 6320 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 15:19:52.388788 6320 ovnkube.go:137] failed to run ov\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.337876 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.348304 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.363170 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:08Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.401239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.401307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.401325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.401348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.401368 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.504735 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.504775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.504783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.504799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.504808 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.607584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.607633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.607645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.607663 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.607676 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.688016 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.688124 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:08 crc kubenswrapper[4884]: E1128 15:20:08.688259 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.688323 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:08 crc kubenswrapper[4884]: E1128 15:20:08.688384 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:08 crc kubenswrapper[4884]: E1128 15:20:08.688714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.710411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.710453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.710464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.710483 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.710495 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.813584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.813645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.813664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.813688 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.813705 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.916755 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.916784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.916793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.916807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:08 crc kubenswrapper[4884]: I1128 15:20:08.916818 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:08Z","lastTransitionTime":"2025-11-28T15:20:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.018981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.019028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.019042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.019064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.019080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.072481 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/2.log" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.076741 4884 scope.go:117] "RemoveContainer" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" Nov 28 15:20:09 crc kubenswrapper[4884]: E1128 15:20:09.076999 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.096562 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.112641 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.122175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.122220 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.122234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.122256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.122271 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.127660 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.140075 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.155978 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.172250 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.192626 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.205447 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.221320 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.224979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.225031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.225054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.225086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.225147 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.250063 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.284322 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.309221 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.326502 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.327832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.327875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.327887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.327906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.327919 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.338843 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.358286 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.377947 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.397894 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.401656 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:09 crc kubenswrapper[4884]: E1128 15:20:09.401937 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:09 crc kubenswrapper[4884]: E1128 15:20:09.402079 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:25.402047756 +0000 UTC m=+64.964831597 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.411883 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:09Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.431454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.431525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.431549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.431578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.431602 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.535166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.535226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.535238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.535257 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.535270 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.638070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.638142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.638154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.638172 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.638187 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.687587 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:09 crc kubenswrapper[4884]: E1128 15:20:09.687750 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.741483 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.741790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.741979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.742293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.742560 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.845500 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.845557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.845574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.845597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.845614 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.948685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.948745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.948764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.948787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:09 crc kubenswrapper[4884]: I1128 15:20:09.948804 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:09Z","lastTransitionTime":"2025-11-28T15:20:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.052564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.052816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.052887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.052991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.053058 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.156686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.156743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.156766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.156796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.156818 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.229575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.229643 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.229661 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.229686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.229705 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.255031 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.261994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.262062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.262078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.262127 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.262152 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.281344 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.286180 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.286235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.286254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.286279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.286295 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.308631 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.314792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.314845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.314861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.314890 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.314904 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.334786 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.341252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.341316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.341330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.341353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.341419 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.354755 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.355430 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.362574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.362664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.362691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.362723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.362746 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.465876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.465959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.465971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.465993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.466008 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.568499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.568563 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.568580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.568605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.568622 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.614816 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.614956 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.615816 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.615853 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:42.615832692 +0000 UTC m=+82.178616493 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.615951 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.616008 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:42.615994625 +0000 UTC m=+82.178778426 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.671741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.671782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.671790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.671803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.671812 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.687768 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.687822 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.687848 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.687992 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.688106 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.688239 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.700650 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.715350 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.716595 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.716750 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.716785 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:20:42.716760572 +0000 UTC m=+82.279544553 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.716822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.716901 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.716931 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.716948 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.717008 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:42.716990677 +0000 UTC m=+82.279774668 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.717030 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.717057 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.717068 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:10 crc kubenswrapper[4884]: E1128 15:20:10.717141 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:42.71712484 +0000 UTC m=+82.279908631 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.730012 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.760030 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.772889 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.774645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.774679 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.774690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.774706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.774717 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.786748 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.801364 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.822792 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.839912 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.853692 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.869909 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.877644 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.877692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.877704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.877722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.877735 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.881755 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.895962 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.914023 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.928541 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.939381 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.951866 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.967211 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:10Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.980499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.980543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.980559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.980581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:10 crc kubenswrapper[4884]: I1128 15:20:10.980598 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:10Z","lastTransitionTime":"2025-11-28T15:20:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.081953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.081997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.082006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.082022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.082032 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.185964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.186027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.186040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.186061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.186075 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.288403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.288450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.288462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.288480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.288493 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.390841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.390885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.390894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.390911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.390921 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.493955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.494001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.494010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.494025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.494036 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.599452 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.599506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.599518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.599541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.599554 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.687388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:11 crc kubenswrapper[4884]: E1128 15:20:11.687613 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.701828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.701897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.701907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.701966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.701977 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.804696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.804769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.804790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.804822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.804841 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.907701 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.907751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.907760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.907774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:11 crc kubenswrapper[4884]: I1128 15:20:11.907783 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:11Z","lastTransitionTime":"2025-11-28T15:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.010404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.010442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.010451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.010468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.010479 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.113545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.113624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.113645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.113678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.113707 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.216653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.216695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.216706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.216725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.216736 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.319775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.319830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.319846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.319867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.319880 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.421977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.422011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.422020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.422035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.422044 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.524465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.524515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.524527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.524575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.524588 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.627318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.627356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.627365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.627381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.627391 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.688261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.688340 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.688413 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:12 crc kubenswrapper[4884]: E1128 15:20:12.688452 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:12 crc kubenswrapper[4884]: E1128 15:20:12.688567 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:12 crc kubenswrapper[4884]: E1128 15:20:12.688686 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.730434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.730495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.730510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.730535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.730551 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.833301 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.833364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.833382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.833408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.833426 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.936810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.936875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.936887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.936909 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:12 crc kubenswrapper[4884]: I1128 15:20:12.936924 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:12Z","lastTransitionTime":"2025-11-28T15:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.039743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.039803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.039817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.039837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.039850 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.143278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.143351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.143370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.143802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.144017 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.246890 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.246933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.246945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.246962 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.246973 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.350874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.350928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.350939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.350958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.350973 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.453822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.453898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.453916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.453939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.453953 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.556434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.556471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.556480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.556495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.556508 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.660102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.660182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.660197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.660215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.660227 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.688158 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:13 crc kubenswrapper[4884]: E1128 15:20:13.688397 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.763547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.763595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.763607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.763628 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.763643 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.867360 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.867415 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.867429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.867451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.867462 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.970989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.971039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.971049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.971068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:13 crc kubenswrapper[4884]: I1128 15:20:13.971079 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:13Z","lastTransitionTime":"2025-11-28T15:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.074992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.075079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.075111 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.075135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.075148 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.179245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.179298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.179311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.179350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.179368 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.282439 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.282484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.282494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.282512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.282523 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.385976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.386051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.386077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.386147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.386174 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.489439 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.489943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.490046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.490183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.490323 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.593754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.594204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.594424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.594583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.594719 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.688175 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.688186 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:14 crc kubenswrapper[4884]: E1128 15:20:14.688382 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.688312 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:14 crc kubenswrapper[4884]: E1128 15:20:14.688483 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:14 crc kubenswrapper[4884]: E1128 15:20:14.688758 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.697550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.697587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.697599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.697618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.697631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.801462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.801531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.801552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.801584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.801606 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.905026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.905178 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.905198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.905238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:14 crc kubenswrapper[4884]: I1128 15:20:14.905257 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:14Z","lastTransitionTime":"2025-11-28T15:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.008808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.008872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.008889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.008912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.008932 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.112572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.112638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.112660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.112687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.112704 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.215842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.215897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.215916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.215937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.215948 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.319679 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.319775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.319791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.319815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.319830 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.423686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.423750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.423768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.423793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.423812 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.526520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.526611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.526627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.526648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.526660 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.629793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.629844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.629853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.629869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.629878 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.688325 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:15 crc kubenswrapper[4884]: E1128 15:20:15.688536 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.732678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.732747 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.732769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.732796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.732814 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.835769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.835820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.835832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.835848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.835858 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.939461 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.939521 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.939531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.939551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:15 crc kubenswrapper[4884]: I1128 15:20:15.939563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:15Z","lastTransitionTime":"2025-11-28T15:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.042734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.042809 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.042817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.042836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.042847 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.145743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.145821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.145840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.145865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.145884 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.249560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.249637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.249656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.249687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.249705 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.352781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.352855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.352888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.352918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.352939 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.455908 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.455970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.455982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.456007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.456025 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.559498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.559558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.559571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.559591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.559603 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.663179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.663242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.663259 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.663288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.663306 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.688302 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.688313 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:16 crc kubenswrapper[4884]: E1128 15:20:16.688587 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.688673 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:16 crc kubenswrapper[4884]: E1128 15:20:16.688817 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:16 crc kubenswrapper[4884]: E1128 15:20:16.689142 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.767076 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.767185 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.767211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.767243 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.767266 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.870797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.870860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.870882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.870912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.870933 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.974583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.974655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.974672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.974699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:16 crc kubenswrapper[4884]: I1128 15:20:16.974718 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:16Z","lastTransitionTime":"2025-11-28T15:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.078264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.078346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.078364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.078917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.078991 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.181525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.181580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.181597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.181620 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.181637 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.284233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.284540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.284677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.284765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.284856 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.387585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.387917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.387995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.388124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.388202 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.490790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.490846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.490861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.490881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.490899 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.593804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.593874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.593888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.593911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.593927 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.687463 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:17 crc kubenswrapper[4884]: E1128 15:20:17.687589 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.696668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.696729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.696745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.696767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.696781 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.800128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.800188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.800204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.800227 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.800244 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.903809 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.903854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.903867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.903885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:17 crc kubenswrapper[4884]: I1128 15:20:17.903899 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:17Z","lastTransitionTime":"2025-11-28T15:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.006381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.006435 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.006448 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.006469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.006482 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.109201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.109265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.109282 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.109304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.109322 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.211743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.211786 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.211804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.211825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.211843 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.315032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.315227 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.315267 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.315310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.315324 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.417695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.417767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.417791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.417822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.417846 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.520995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.521105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.521125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.521159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.521176 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.624228 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.624292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.624310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.624336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.624355 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.691903 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.691978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:18 crc kubenswrapper[4884]: E1128 15:20:18.692079 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.691903 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:18 crc kubenswrapper[4884]: E1128 15:20:18.692263 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:18 crc kubenswrapper[4884]: E1128 15:20:18.692421 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.727067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.727122 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.727133 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.727149 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.727162 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.830534 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.830604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.830623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.830656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.830677 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.934649 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.934744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.934770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.934812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:18 crc kubenswrapper[4884]: I1128 15:20:18.934839 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:18Z","lastTransitionTime":"2025-11-28T15:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.037642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.037713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.037731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.037760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.037781 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.141515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.141583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.141602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.141634 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.141660 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.245242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.245593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.245678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.245781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.245875 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.349515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.349570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.349581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.349600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.349615 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.452495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.452964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.453214 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.453397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.453549 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.556983 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.557019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.557045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.557060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.557068 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.660955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.661069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.661142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.661187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.661213 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.687362 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:19 crc kubenswrapper[4884]: E1128 15:20:19.687593 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.764408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.764589 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.764624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.764656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.764682 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.869111 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.869175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.869187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.869211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.869228 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.971992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.972047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.972055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.972070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:19 crc kubenswrapper[4884]: I1128 15:20:19.972079 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:19Z","lastTransitionTime":"2025-11-28T15:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.074857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.074935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.074957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.074980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.074998 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.178203 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.178306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.178325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.178358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.178387 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.281704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.281767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.281785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.281812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.281831 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.384932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.385003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.385023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.385056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.385076 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.488297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.488361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.488377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.488401 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.488418 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.503268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.503356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.503380 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.503412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.503435 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.525219 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.530492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.530571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.530595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.530623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.530644 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.549952 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.555503 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.555565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.555586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.555613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.555631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.578445 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.583317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.583378 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.583390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.583411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.583425 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.601731 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.606708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.606741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.606753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.606770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.606779 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.621124 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.621357 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.623454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.623511 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.623534 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.623564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.623586 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.688016 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.688038 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.688226 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.688458 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.688661 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:20 crc kubenswrapper[4884]: E1128 15:20:20.688966 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.703624 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.724679 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.727691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.727764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.727786 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.727816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.727837 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.742380 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.757461 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.771380 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.793251 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.806317 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.819166 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.830672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.830894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.830974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.831046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.831141 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.833456 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.842919 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.853414 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.866080 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.877344 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.885323 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.898436 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.914790 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.928591 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.933760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.933803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.933818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.933835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.933847 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:20Z","lastTransitionTime":"2025-11-28T15:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:20 crc kubenswrapper[4884]: I1128 15:20:20.941406 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:20Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.036006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.036051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.036061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.036082 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.036115 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.138421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.138494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.138511 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.138539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.138560 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.241456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.241518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.241540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.241572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.241594 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.345618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.345680 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.345693 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.345717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.345728 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.448763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.448818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.448834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.448855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.448871 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.551946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.552010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.552026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.552045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.552056 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.655008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.655085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.655128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.655158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.655179 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.687587 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:21 crc kubenswrapper[4884]: E1128 15:20:21.687763 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.758415 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.758448 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.758456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.758470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.758479 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.861172 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.861216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.861229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.861247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.861260 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.965678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.966382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.966481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.966582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:21 crc kubenswrapper[4884]: I1128 15:20:21.966675 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:21Z","lastTransitionTime":"2025-11-28T15:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.069236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.069459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.069547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.069620 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.069681 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.173134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.173391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.173470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.173601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.173657 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.276688 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.276738 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.276754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.276774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.276792 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.380428 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.380496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.380514 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.380539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.380561 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.485480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.485547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.485586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.485619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.485641 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.587655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.587698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.587707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.587722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.587731 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.687371 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.687478 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.687399 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:22 crc kubenswrapper[4884]: E1128 15:20:22.687575 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:22 crc kubenswrapper[4884]: E1128 15:20:22.687702 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:22 crc kubenswrapper[4884]: E1128 15:20:22.687791 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.689954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.690011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.690033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.690062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.690084 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.792950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.793000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.793011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.793029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.793040 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.896458 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.896532 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.896554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.896587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.896612 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.999748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.999814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.999825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.999847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:22 crc kubenswrapper[4884]: I1128 15:20:22.999859 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:22Z","lastTransitionTime":"2025-11-28T15:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.105880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.105934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.105948 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.105969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.105983 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.209026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.209068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.209081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.209126 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.209141 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.311714 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.311742 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.311749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.311762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.311771 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.414325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.414370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.414385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.414408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.414425 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.517629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.517677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.517692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.517717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.517732 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.621342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.621405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.621424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.621448 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.621465 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.688279 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:23 crc kubenswrapper[4884]: E1128 15:20:23.688437 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.724788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.724854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.724873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.724901 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.724920 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.827982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.828041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.828057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.828082 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.828116 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.931548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.931649 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.931675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.931717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:23 crc kubenswrapper[4884]: I1128 15:20:23.931742 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:23Z","lastTransitionTime":"2025-11-28T15:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.034003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.034059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.034071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.034106 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.034121 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.136488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.136537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.136549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.136569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.136580 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.239141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.239186 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.239196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.239217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.239227 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.341834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.341877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.341889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.341904 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.341914 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.444327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.444370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.444382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.444397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.444408 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.546710 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.546755 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.546770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.546792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.546808 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.650081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.650138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.650148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.650164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.650173 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.688035 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:24 crc kubenswrapper[4884]: E1128 15:20:24.688192 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.688036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:24 crc kubenswrapper[4884]: E1128 15:20:24.688371 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.688720 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:24 crc kubenswrapper[4884]: E1128 15:20:24.688806 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.689009 4884 scope.go:117] "RemoveContainer" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" Nov 28 15:20:24 crc kubenswrapper[4884]: E1128 15:20:24.689295 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.758651 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.758719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.758742 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.758773 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.759236 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.861745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.861789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.861799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.861817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.861828 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.964554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.964600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.964611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.964630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:24 crc kubenswrapper[4884]: I1128 15:20:24.964641 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:24Z","lastTransitionTime":"2025-11-28T15:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.068526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.068585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.068602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.068626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.068645 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.171501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.171542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.171551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.171568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.171577 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.274468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.274526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.274554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.274603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.274629 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.376642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.377051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.377174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.377271 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.377339 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.479565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.479930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.480171 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.480600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.481006 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.483426 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:25 crc kubenswrapper[4884]: E1128 15:20:25.483540 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:25 crc kubenswrapper[4884]: E1128 15:20:25.483584 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:20:57.483570069 +0000 UTC m=+97.046353870 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.584488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.584803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.584911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.585023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.585151 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.687244 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:25 crc kubenswrapper[4884]: E1128 15:20:25.687388 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.687958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.688019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.688030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.688048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.688059 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.790414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.790456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.790467 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.790484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.790495 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.892573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.892620 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.892630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.892647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.892659 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.995516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.995855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.995952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.996050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:25 crc kubenswrapper[4884]: I1128 15:20:25.996148 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:25Z","lastTransitionTime":"2025-11-28T15:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.099353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.099397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.099407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.099427 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.099441 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.202351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.202389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.202399 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.202412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.202424 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.304406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.304446 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.304459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.304475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.304486 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.406846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.406888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.406897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.406917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.406930 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.509424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.509487 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.509509 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.509539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.509561 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.612175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.612230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.612242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.612261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.612272 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.688194 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.688261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:26 crc kubenswrapper[4884]: E1128 15:20:26.688386 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.688576 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:26 crc kubenswrapper[4884]: E1128 15:20:26.688653 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:26 crc kubenswrapper[4884]: E1128 15:20:26.688869 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.715273 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.715330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.715347 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.715373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.715396 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.818352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.818395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.818404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.818419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.818428 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.920826 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.920873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.920882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.920900 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:26 crc kubenswrapper[4884]: I1128 15:20:26.920912 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:26Z","lastTransitionTime":"2025-11-28T15:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.023947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.023996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.024005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.024025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.024042 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.126238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.126294 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.126306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.126326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.126339 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.139522 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/0.log" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.139586 4884 generic.go:334] "Generic (PLEG): container finished" podID="237d188f-b799-4a82-bc67-c3a8fac5771f" containerID="732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6" exitCode=1 Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.139628 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerDied","Data":"732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.140264 4884 scope.go:117] "RemoveContainer" containerID="732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.159410 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.171839 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.189891 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.209238 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.223213 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.229681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.229974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.230046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.230161 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.230251 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.234060 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.256819 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.268034 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.278993 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.291623 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.301827 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.314892 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.330059 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.332407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.332436 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.332445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.332460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.332468 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.343819 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.353840 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.365743 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.378912 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.390395 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:27Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.435798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.435871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.435890 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.436322 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.436358 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.539363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.539409 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.539421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.539438 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.539451 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.641491 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.641533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.641542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.641560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.641570 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.688440 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:27 crc kubenswrapper[4884]: E1128 15:20:27.688984 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.744824 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.744879 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.744892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.744914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.744927 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.847588 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.847627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.847640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.847655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.847665 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.949985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.950021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.950033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.950052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:27 crc kubenswrapper[4884]: I1128 15:20:27.950064 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:27Z","lastTransitionTime":"2025-11-28T15:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.052352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.052384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.052393 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.052406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.052416 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.145280 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/0.log" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.145357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerStarted","Data":"b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.154354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.154407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.154425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.154449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.154466 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.161227 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.178164 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.194308 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.210964 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.225400 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.241024 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.253186 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.257183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.257215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.257223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.257241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.257252 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.268766 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.288332 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.307401 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.326870 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.348278 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.360058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.360101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.360111 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.360128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.360138 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.363258 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.376381 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.407712 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.425315 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.439220 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.456172 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:28Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.462862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.462910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.462921 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.462938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.462949 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.565432 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.565488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.565504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.565531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.565546 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.667755 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.667822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.667837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.667864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.667879 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.688186 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.688232 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:28 crc kubenswrapper[4884]: E1128 15:20:28.688331 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.688421 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:28 crc kubenswrapper[4884]: E1128 15:20:28.688541 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:28 crc kubenswrapper[4884]: E1128 15:20:28.688735 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.770466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.770540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.770558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.770586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.770607 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.873859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.873917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.873932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.873951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.873964 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.976733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.976809 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.976827 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.976851 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:28 crc kubenswrapper[4884]: I1128 15:20:28.976870 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:28Z","lastTransitionTime":"2025-11-28T15:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.079720 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.079779 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.079792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.079814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.079827 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.182973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.183045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.183069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.183146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.183176 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.287229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.287694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.287906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.288155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.288395 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.391473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.391523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.391535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.391552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.391563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.494167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.494218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.494230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.494256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.494275 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.597894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.597946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.597958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.597978 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.597994 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.688168 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:29 crc kubenswrapper[4884]: E1128 15:20:29.688331 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.700157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.700219 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.700236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.700262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.700289 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.804546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.804599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.804617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.804641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.804658 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.908158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.908215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.908229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.908254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:29 crc kubenswrapper[4884]: I1128 15:20:29.908271 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:29Z","lastTransitionTime":"2025-11-28T15:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.010958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.011056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.011074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.011122 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.011139 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.113843 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.113913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.113922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.113940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.113951 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.216459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.216501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.216512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.216531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.216542 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.321220 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.321494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.321568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.321652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.321754 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.424552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.424596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.424605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.424621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.424631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.526807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.526853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.526865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.526881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.526893 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.629860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.629911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.629924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.629944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.629960 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.687479 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.687565 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.687730 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.687802 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.687908 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.688019 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.690003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.690061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.690071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.690104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.690115 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.712488 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.713157 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.717659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.717767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.717778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.717811 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.717823 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.726697 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.731124 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.734830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.734870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.734907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.734925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.734938 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.743600 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.748426 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.751585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.751616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.751626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.751642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.751655 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.756649 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.768846 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.770452 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.772530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.772564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.772572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.772587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.772597 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.783403 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: E1128 15:20:30.783590 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.784725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.784772 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.784783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.784800 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.784836 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.788361 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.799941 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.814060 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.826449 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.842637 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.854378 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.865039 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.875656 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.887496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.887530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.887541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.887556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.887567 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.895685 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.909885 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.920955 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.934692 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.950425 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:30Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.990547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.990602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.990622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.990647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:30 crc kubenswrapper[4884]: I1128 15:20:30.990667 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:30Z","lastTransitionTime":"2025-11-28T15:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.093853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.093898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.093912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.093933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.093945 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.196027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.196390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.196404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.196430 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.196442 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.298299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.298342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.298353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.298369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.298380 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.401034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.401083 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.401113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.401132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.401143 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.504264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.504307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.504317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.504335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.504345 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.606799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.606838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.606847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.606863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.606873 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.687332 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:31 crc kubenswrapper[4884]: E1128 15:20:31.687714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.709717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.709771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.709786 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.709802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.709815 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.812691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.812776 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.812794 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.812816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.812831 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.915585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.915629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.915662 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.915683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:31 crc kubenswrapper[4884]: I1128 15:20:31.915693 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:31Z","lastTransitionTime":"2025-11-28T15:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.018579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.018646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.018659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.018675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.018686 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.121555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.121596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.121606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.121622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.121631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.225652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.225725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.225739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.225760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.225800 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.328239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.328468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.328589 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.328666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.328736 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.431333 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.431376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.431390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.431410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.431425 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.534413 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.534464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.534477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.534498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.534510 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.637005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.637038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.637047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.637062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.637073 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.687994 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.688079 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.688125 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:32 crc kubenswrapper[4884]: E1128 15:20:32.688167 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:32 crc kubenswrapper[4884]: E1128 15:20:32.688203 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:32 crc kubenswrapper[4884]: E1128 15:20:32.688264 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.740634 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.740683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.740694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.740712 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.740727 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.842968 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.843038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.843054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.843126 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.843144 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.947013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.947646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.947741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.947841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:32 crc kubenswrapper[4884]: I1128 15:20:32.947929 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:32Z","lastTransitionTime":"2025-11-28T15:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.051236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.051287 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.051304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.051329 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.051347 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.154382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.154797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.154955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.155136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.155265 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.259167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.259210 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.259224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.259243 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.259252 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.362618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.362660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.362671 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.362687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.362699 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.465362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.465390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.465398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.465412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.465422 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.568164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.568230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.568251 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.568315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.568337 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.670936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.670999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.671017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.671043 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.671059 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.688310 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:33 crc kubenswrapper[4884]: E1128 15:20:33.688505 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.774004 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.774057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.774072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.774109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.774125 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.878061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.878169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.878194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.878222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.878239 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.980951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.981022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.981044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.981075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:33 crc kubenswrapper[4884]: I1128 15:20:33.981140 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:33Z","lastTransitionTime":"2025-11-28T15:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.084316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.084450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.084477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.084510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.084534 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.187740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.188479 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.188690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.188887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.189130 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.291935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.292281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.292518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.292845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.293043 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.398470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.398515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.398529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.398548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.398563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.501625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.501689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.501708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.501733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.501751 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.605708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.605767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.605787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.605816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.605832 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.687652 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:34 crc kubenswrapper[4884]: E1128 15:20:34.688041 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.687757 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.687750 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:34 crc kubenswrapper[4884]: E1128 15:20:34.688359 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:34 crc kubenswrapper[4884]: E1128 15:20:34.688569 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.707689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.707720 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.707732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.707746 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.707757 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.809890 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.809961 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.809980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.809997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.810009 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.913249 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.913303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.913319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.913339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:34 crc kubenswrapper[4884]: I1128 15:20:34.913350 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:34Z","lastTransitionTime":"2025-11-28T15:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.017068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.017285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.017303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.017332 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.017354 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.121245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.121318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.121337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.121362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.121380 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.225054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.225109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.225119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.225139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.225157 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.327652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.327689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.327699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.327715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.327726 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.433412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.433719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.433884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.434028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.434195 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.538396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.538466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.538484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.538513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.538532 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.641533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.641596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.641617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.641645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.641667 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.687842 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:35 crc kubenswrapper[4884]: E1128 15:20:35.688153 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.744953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.744994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.745005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.745064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.745077 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.846771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.846808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.846821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.846836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.846848 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.948990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.949029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.949049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.949078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:35 crc kubenswrapper[4884]: I1128 15:20:35.949152 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:35Z","lastTransitionTime":"2025-11-28T15:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.052280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.052355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.052373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.052402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.052422 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.155335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.155461 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.155480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.155549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.155566 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.257958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.258008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.258020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.258039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.258054 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.360924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.361001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.361019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.361038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.361048 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.463779 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.463822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.463836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.463853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.463863 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.566221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.566278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.566324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.566343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.566355 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.669444 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.669506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.669521 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.669540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.669557 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.688173 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.688254 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.688253 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:36 crc kubenswrapper[4884]: E1128 15:20:36.688727 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:36 crc kubenswrapper[4884]: E1128 15:20:36.688894 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:36 crc kubenswrapper[4884]: E1128 15:20:36.689034 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.689040 4884 scope.go:117] "RemoveContainer" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.773097 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.773137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.773146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.773163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.773172 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.876897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.876937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.876949 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.876966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.876975 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.980189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.980258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.980277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.980298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:36 crc kubenswrapper[4884]: I1128 15:20:36.980318 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:36Z","lastTransitionTime":"2025-11-28T15:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.083729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.083767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.083777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.083792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.083802 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.175760 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/2.log" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.185996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.186030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.186038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.186051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.186059 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.288847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.288887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.288896 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.288917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.288932 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.418917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.418960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.418973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.418994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.419004 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.522029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.522123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.522138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.522160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.522176 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.624114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.624156 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.624491 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.624528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.624538 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.687541 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:37 crc kubenswrapper[4884]: E1128 15:20:37.687709 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.726147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.726187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.726196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.726209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.726219 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.828533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.828567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.828608 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.828625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.828635 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.931342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.931400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.931414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.931435 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:37 crc kubenswrapper[4884]: I1128 15:20:37.931445 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:37Z","lastTransitionTime":"2025-11-28T15:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.033567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.033605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.033617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.033635 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.033647 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.136780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.136857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.137261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.137316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.137335 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.183416 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/3.log" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.183987 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/2.log" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.186757 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" exitCode=1 Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.186812 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.186888 4884 scope.go:117] "RemoveContainer" containerID="d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.187922 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:20:38 crc kubenswrapper[4884]: E1128 15:20:38.188121 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.202383 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.219653 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.231673 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.239963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.240017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.240031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.240050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.240061 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.243872 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.255669 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.268759 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.280151 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.291927 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.302519 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.311950 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.322522 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.339826 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.342396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.342433 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.342444 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.342460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.342471 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.359247 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:38Z\\\",\\\"message\\\":\\\":true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.168:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {63b1440a-0908-4cab-8799-012fa1cf0b07}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:37.832614 6801 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1128 15:20:37.832623 6801 obj_retry.go:409] Going to retry *v1.Pod resource setup for 14 objects: [openshift-ovn-kubernetes/ovnkube-node-6wh6q openshift-dns/node-resolver-4kfcr openshift-multus/network-metrics-daemon-5nbz9 openshift-network-diagnostics/network-check-target-xd92c openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs openshift-multus/multus-additional-cni-plugins-kk24c openshift-multus/multus-zj27d openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-kbcrs openshift-kube-apiserver/kube-apiserver-crc openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc]\\\\nI1128 15:20:37.832613 6801 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.377032 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.394507 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.413491 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.427182 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.437077 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:38Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.445113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.445150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.445158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.445173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.445183 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.547070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.547138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.547153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.547175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.547190 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.650121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.650164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.650175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.650191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.650203 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.687894 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:38 crc kubenswrapper[4884]: E1128 15:20:38.688083 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.688135 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:38 crc kubenswrapper[4884]: E1128 15:20:38.688215 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.688217 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:38 crc kubenswrapper[4884]: E1128 15:20:38.688479 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.752766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.752835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.752847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.752867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.752880 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.855684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.855741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.855752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.855771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.855785 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.958715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.958756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.958765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.958780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:38 crc kubenswrapper[4884]: I1128 15:20:38.958789 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:38Z","lastTransitionTime":"2025-11-28T15:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.060995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.061032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.061041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.061055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.061066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.164170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.164225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.164239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.164258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.164276 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.192491 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/3.log" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.267690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.267763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.267785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.267830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.267872 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.371637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.371679 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.371688 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.371705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.371715 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.474660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.474715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.474726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.474742 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.474752 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.577054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.577146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.577163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.577189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.577212 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.679738 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.679780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.679788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.679804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.679815 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.687979 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:39 crc kubenswrapper[4884]: E1128 15:20:39.688151 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.782301 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.782340 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.782348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.782367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.782377 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.884124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.884155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.884163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.884199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.884209 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.986932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.986976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.986986 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.987000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:39 crc kubenswrapper[4884]: I1128 15:20:39.987009 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:39Z","lastTransitionTime":"2025-11-28T15:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.089206 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.089277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.089299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.089326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.089346 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.192587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.192661 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.192685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.192712 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.192733 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.295216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.295270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.295285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.295309 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.295325 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.398381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.398418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.398427 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.398460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.398469 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.500796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.500844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.500856 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.500873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.500885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.602956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.603003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.603018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.603039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.603051 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.687814 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.687814 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:40 crc kubenswrapper[4884]: E1128 15:20:40.688035 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.687841 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:40 crc kubenswrapper[4884]: E1128 15:20:40.688173 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:40 crc kubenswrapper[4884]: E1128 15:20:40.688214 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.703788 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.705719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.705769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.705778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.705793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.705803 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.721426 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.733363 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.746661 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.757074 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.767698 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.779754 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.793580 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.804519 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.808007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.808057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.808072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.808112 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.808129 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.813849 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.824374 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.837574 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.860549 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d0b7f4bcd1ccf7700fa34ddc93dd8edf1609943f19f3bdd7fe0b56adfdc93e69\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"message\\\":\\\", EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.161\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1128 15:20:07.330836 6496 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-kube-apiserver-operator/metrics]} name:Service_openshift-kube-apiserver-operator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.109:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {1f62a432-33b9-495d-83b2-d1dbe6961325}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:07.330946 6496 services_controller.go:356] Processing sync for service openshift-authentication/oauth-openshift for network=default\\\\nF1128 15:20:07.329861 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:38Z\\\",\\\"message\\\":\\\":true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.168:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {63b1440a-0908-4cab-8799-012fa1cf0b07}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:37.832614 6801 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1128 15:20:37.832623 6801 obj_retry.go:409] Going to retry *v1.Pod resource setup for 14 objects: [openshift-ovn-kubernetes/ovnkube-node-6wh6q openshift-dns/node-resolver-4kfcr openshift-multus/network-metrics-daemon-5nbz9 openshift-network-diagnostics/network-check-target-xd92c openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs openshift-multus/multus-additional-cni-plugins-kk24c openshift-multus/multus-zj27d openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-kbcrs openshift-kube-apiserver/kube-apiserver-crc openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc]\\\\nI1128 15:20:37.832613 6801 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.872708 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.883806 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.901803 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.910725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.910798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.910824 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.910857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.910882 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:40Z","lastTransitionTime":"2025-11-28T15:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.913166 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:40 crc kubenswrapper[4884]: I1128 15:20:40.924181 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:40Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.013599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.013659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.013670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.013689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.013700 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.054390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.054456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.054476 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.054496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.054506 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.067685 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.071912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.071960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.071973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.071991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.072003 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.083382 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.087475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.087552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.087562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.087576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.087585 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.102901 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.107453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.107502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.107516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.107537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.107552 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.121251 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.126734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.126789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.126804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.126821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.126831 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.142615 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:41Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.142731 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.144281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.144302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.144310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.144323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.144332 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.248315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.248356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.248369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.248387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.248402 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.350887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.350938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.350947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.350962 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.350974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.453941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.454001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.454011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.454030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.454044 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.556717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.556777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.556790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.556810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.556822 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.660325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.660388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.660398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.660417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.660437 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.687739 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:41 crc kubenswrapper[4884]: E1128 15:20:41.687923 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.763815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.763878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.763893 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.763918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.763934 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.867734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.867801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.867820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.867855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.867891 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.981173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.981684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.981705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.981736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:41 crc kubenswrapper[4884]: I1128 15:20:41.981755 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:41Z","lastTransitionTime":"2025-11-28T15:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.084998 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.085041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.085050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.085067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.085076 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.188234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.188284 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.188296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.188314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.188326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.291297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.291365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.291382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.291406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.291423 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.394068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.394224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.394244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.394268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.394288 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.496885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.496950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.496963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.496981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.496993 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.599863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.599951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.599978 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.600272 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.600318 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.670949 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.671043 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.671164 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.671223 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:21:46.671205714 +0000 UTC m=+146.233989515 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.671289 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.671400 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 15:21:46.671376688 +0000 UTC m=+146.234160689 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.688130 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.688231 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.688261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.688475 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.688615 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.688829 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.702173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.702235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.702244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.702261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.702272 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.771870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.772016 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.772057 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772131 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:46.772081686 +0000 UTC m=+146.334865497 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772220 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772256 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772271 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772274 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772295 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772307 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772334 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 15:21:46.772310882 +0000 UTC m=+146.335094813 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:42 crc kubenswrapper[4884]: E1128 15:20:42.772357 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 15:21:46.772350123 +0000 UTC m=+146.335134154 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.804873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.804931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.804950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.804976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.804989 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.908202 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.908250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.908262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.908280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:42 crc kubenswrapper[4884]: I1128 15:20:42.908293 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:42Z","lastTransitionTime":"2025-11-28T15:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.011535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.011575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.011584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.011599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.011610 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.114662 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.114732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.114749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.114773 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.114791 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.217764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.217833 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.217849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.217870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.217882 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.321645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.321685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.321709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.321741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.321755 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.425293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.425392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.425414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.425439 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.425456 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.528765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.528850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.528870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.528907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.528929 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.632358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.632431 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.632445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.632469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.632484 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.688375 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:43 crc kubenswrapper[4884]: E1128 15:20:43.688573 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.736234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.736285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.736295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.736311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.736322 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.839147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.839194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.839203 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.839217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.839226 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.942121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.942185 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.942198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.942223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:43 crc kubenswrapper[4884]: I1128 15:20:43.942238 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:43Z","lastTransitionTime":"2025-11-28T15:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.044295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.044376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.044394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.044418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.044436 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.147581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.147621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.147632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.147647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.147658 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.251666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.251726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.251738 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.251756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.251769 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.354406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.354463 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.354473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.354492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.354507 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.457745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.457796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.457807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.457822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.457831 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.560464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.560519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.560531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.560549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.560563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.663218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.663266 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.663274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.663290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.663300 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.687999 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.688039 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.688133 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:44 crc kubenswrapper[4884]: E1128 15:20:44.688212 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:44 crc kubenswrapper[4884]: E1128 15:20:44.688310 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:44 crc kubenswrapper[4884]: E1128 15:20:44.688640 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.767014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.767064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.767082 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.767181 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.767199 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.870994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.871077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.871127 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.871161 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.871180 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.975163 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.975238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.975255 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.975285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:44 crc kubenswrapper[4884]: I1128 15:20:44.975305 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:44Z","lastTransitionTime":"2025-11-28T15:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.077689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.077737 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.077749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.077769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.077779 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.115557 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.116546 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:20:45 crc kubenswrapper[4884]: E1128 15:20:45.116735 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.132620 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.147960 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.159028 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.179802 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.181403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.181469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.181484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.181512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.181529 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.196666 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.215022 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.227422 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.241507 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.255683 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.269012 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.287895 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.287925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.287933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.287948 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.287957 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.295998 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:38Z\\\",\\\"message\\\":\\\":true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.168:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {63b1440a-0908-4cab-8799-012fa1cf0b07}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:37.832614 6801 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1128 15:20:37.832623 6801 obj_retry.go:409] Going to retry *v1.Pod resource setup for 14 objects: [openshift-ovn-kubernetes/ovnkube-node-6wh6q openshift-dns/node-resolver-4kfcr openshift-multus/network-metrics-daemon-5nbz9 openshift-network-diagnostics/network-check-target-xd92c openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs openshift-multus/multus-additional-cni-plugins-kk24c openshift-multus/multus-zj27d openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-kbcrs openshift-kube-apiserver/kube-apiserver-crc openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc]\\\\nI1128 15:20:37.832613 6801 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.313843 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.331038 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.360892 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.372843 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.384489 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.390849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.390902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.390915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.390933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.390946 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.397003 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.408210 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:45Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.494153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.494210 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.494228 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.494254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.494276 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.597067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.597207 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.597239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.597270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.597292 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.687924 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:45 crc kubenswrapper[4884]: E1128 15:20:45.688118 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.699777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.699863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.699876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.699905 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.699921 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.802870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.802947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.802971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.803001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.803026 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.906038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.906155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.906179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.906209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:45 crc kubenswrapper[4884]: I1128 15:20:45.906231 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:45Z","lastTransitionTime":"2025-11-28T15:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.009371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.009437 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.009453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.009478 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.009495 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.113071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.113121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.113132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.113148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.113159 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.215941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.215975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.215988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.216005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.216016 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.318347 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.318392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.318410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.318431 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.318448 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.420569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.420640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.420663 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.420693 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.420715 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.523797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.523899 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.523922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.523953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.523974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.627383 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.627443 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.627470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.627514 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.627538 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.687780 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:46 crc kubenswrapper[4884]: E1128 15:20:46.688039 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.688406 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.688473 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:46 crc kubenswrapper[4884]: E1128 15:20:46.689328 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:46 crc kubenswrapper[4884]: E1128 15:20:46.689643 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.709549 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.731074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.731173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.731191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.731216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.731235 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.834355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.834410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.834423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.834445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.834462 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.937843 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.937944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.937967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.937996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:46 crc kubenswrapper[4884]: I1128 15:20:46.938019 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:46Z","lastTransitionTime":"2025-11-28T15:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.041433 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.041485 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.041496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.041515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.041526 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.145392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.145448 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.145460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.145500 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.145518 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.247743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.247789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.247802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.247823 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.247837 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.351468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.351516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.351529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.351550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.351563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.453956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.454010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.454020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.454041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.454053 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.557602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.557651 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.557671 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.557691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.557703 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.660664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.660707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.660722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.660744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.660758 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.687948 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:47 crc kubenswrapper[4884]: E1128 15:20:47.688209 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.763948 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.764012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.764025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.764049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.764064 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.867199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.867238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.867248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.867265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.867285 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.970872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.970933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.970950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.970985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:47 crc kubenswrapper[4884]: I1128 15:20:47.971004 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:47Z","lastTransitionTime":"2025-11-28T15:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.075165 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.075303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.075316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.075337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.075349 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.178024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.178061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.178069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.178082 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.178120 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.281441 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.281503 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.281513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.281532 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.281544 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.385229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.385299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.385314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.385341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.385355 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.488025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.488141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.488164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.488191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.488211 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.590957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.591007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.591017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.591038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.591050 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.688030 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.688163 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:48 crc kubenswrapper[4884]: E1128 15:20:48.688218 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.688288 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:48 crc kubenswrapper[4884]: E1128 15:20:48.688415 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:48 crc kubenswrapper[4884]: E1128 15:20:48.688557 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.693851 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.693893 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.693906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.693922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.693934 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.796544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.796589 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.796599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.796616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.796629 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.899743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.899813 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.899824 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.899845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:48 crc kubenswrapper[4884]: I1128 15:20:48.899858 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:48Z","lastTransitionTime":"2025-11-28T15:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.003143 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.003179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.003188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.003204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.003214 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.105761 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.105808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.105822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.105841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.105852 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.209581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.209660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.209681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.209716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.209736 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.312262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.312330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.312343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.312365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.312379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.415324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.415385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.415403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.415427 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.415446 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.518381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.518423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.518436 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.518453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.518466 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.621648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.621704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.621716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.621734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.621744 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.687766 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:49 crc kubenswrapper[4884]: E1128 15:20:49.687943 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.723846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.723884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.723901 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.723917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.723929 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.826790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.826844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.826862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.826886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.826903 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.930460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.930524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.930542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.930567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:49 crc kubenswrapper[4884]: I1128 15:20:49.930585 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:49Z","lastTransitionTime":"2025-11-28T15:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.037331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.037483 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.037525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.037565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.037609 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.140818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.140856 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.140864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.140880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.140891 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.243601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.243668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.243692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.243720 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.243742 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.347020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.347123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.347144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.347174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.347191 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.449924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.449977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.449998 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.450260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.450280 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.553523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.553606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.553634 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.553673 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.553699 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.656639 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.656687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.656697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.656716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.656727 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.688214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.688226 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:50 crc kubenswrapper[4884]: E1128 15:20:50.688400 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.688420 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:50 crc kubenswrapper[4884]: E1128 15:20:50.688560 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:50 crc kubenswrapper[4884]: E1128 15:20:50.688666 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.705985 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.719483 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-4kfcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"552dad10-9ef9-4e53-ba05-00b44ae3a499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2f76f8cd8a869194f43b0f7a7e159cb1b9e4bae54a3a4d00e4f74f0a001f763\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w4mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-4kfcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.734332 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df18773-f995-4e08-bcd0-81350c8d83ae\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 15:19:32.960810 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 15:19:32.965350 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-675970110/tls.crt::/tmp/serving-cert-675970110/tls.key\\\\\\\"\\\\nI1128 15:19:38.351421 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 15:19:38.353429 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 15:19:38.353453 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 15:19:38.353482 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 15:19:38.353487 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 15:19:38.365813 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 15:19:38.365851 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365862 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 15:19:38.365869 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 15:19:38.365875 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 15:19:38.365880 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 15:19:38.365885 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 15:19:38.365894 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 15:19:38.371818 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.755518 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://307f00f408b471a1812aff3d23903854b698984c89db08463747e290b93ff903\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.765836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.766192 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.766392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.766487 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.766530 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.778155 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:53Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fvq78\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:53Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-5nbz9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.792887 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b4bd8eaf090e8f382e009a81db31d2362e165ad7fa7703ed505399c66642b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://065f13b18c44c0a4e4cf99358ceec22e9778914dcf3afac6a02b78a3be12bca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.814116 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.828415 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19d5b2cf-5ea1-402e-9891-465d8f82be2b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93c012b0fce62892e81d8576b0db93e4fff7f483a0d997510d98f8a6229b7dbd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://034222a541bb814f61f69a88ef4436badaa1f0b78a689afae08d00083006231f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://034222a541bb814f61f69a88ef4436badaa1f0b78a689afae08d00083006231f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.847941 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"afc6a39f-97f9-4846-9f6d-853c99d9f6b1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64fc103c977b1e021d7c4d0010ea771c8b6c198c346d22adc4f0e65bafcc1184\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dab5fd95bc201189deb8e36c548e0f9c0caf8513c1eddc1abb5577fb77540134\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://557ed289e07e1c1a0fc3b8bdae5e77f30b21807db1e5b6bd13e37e35090a0f47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://461726973adfb9ab9049b656d520891dccdb9c5cf913b334f298afddda10fc53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc7986ed4a2b46844f284cc714d4fa38422184bea0f0ad7818770ee4fb09b31b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f60d876dd3be08d4076ae00d7af40819851516ea57026daf6135d66e46dbd86\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cd5e0a55fc981cc83ce21951bc7c9a72793c42d07175074f697b1f271a54661d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ef6fd10796a944a086402039da62d535f52292628e9cd8b3dd09b0b678a1dc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.861106 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://822280fb9b5fdeb4a84478edd9bc8d3534538de7a7e4abb1ac93b7c8b933fafe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.869379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.869418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.869430 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.869449 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.869462 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.874557 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"120c26c6-4231-418f-a5af-738dc44915f8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa7b893a86763348abd27462ffa6f24307e3a9ef79b4f3b102101d9071ec07f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tvt9v\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-pwcbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.888995 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-zj27d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"237d188f-b799-4a82-bc67-c3a8fac5771f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:26Z\\\",\\\"message\\\":\\\"2025-11-28T15:19:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b\\\\n2025-11-28T15:19:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cb3c2744-2beb-4e9b-ae12-3e9f81d0483b to /host/opt/cni/bin/\\\\n2025-11-28T15:19:41Z [verbose] multus-daemon started\\\\n2025-11-28T15:19:41Z [verbose] Readiness Indicator file check\\\\n2025-11-28T15:20:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:20:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c95sm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-zj27d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.907738 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d1b77432-5316-4dd6-a4a9-f74651377bdd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T15:20:38Z\\\",\\\"message\\\":\\\":true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.168:8443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {63b1440a-0908-4cab-8799-012fa1cf0b07}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 15:20:37.832614 6801 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI1128 15:20:37.832623 6801 obj_retry.go:409] Going to retry *v1.Pod resource setup for 14 objects: [openshift-ovn-kubernetes/ovnkube-node-6wh6q openshift-dns/node-resolver-4kfcr openshift-multus/network-metrics-daemon-5nbz9 openshift-network-diagnostics/network-check-target-xd92c openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs openshift-multus/multus-additional-cni-plugins-kk24c openshift-multus/multus-zj27d openshift-etcd/etcd-crc openshift-network-diagnostics/network-check-source-55646444c4-trplf openshift-network-operator/network-operator-58b4c7f79c-55gtf openshift-image-registry/node-ca-kbcrs openshift-kube-apiserver/kube-apiserver-crc openshift-kube-controller-manager/kube-controller-manager-crc openshift-kube-scheduler/openshift-kube-scheduler-crc]\\\\nI1128 15:20:37.832613 6801 transact.go:42] Configuring OVN: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T15:20:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-frpmx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-6wh6q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.920789 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"723c8d1a-74a9-49e0-8bc1-f9dcd0de9d99\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4918fb62d9942f2ffa711e8ab0cf609e3bc65b8a28aef3467e56ca0d1846555f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f368535a1f7d3fb6cf63cef6568526f0af26b447874480a9b6430ee027dbf0e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9d8edae459b3b72cc19e4be8894b700190b78b0a7aed8fb1886aaf6005360201\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.932416 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"44d44185-89a4-4c11-8239-b3815e054aca\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b28bf6d6abf74e72b2697d0a80c4fa6762e1171a5bbd3d1fd8d4bfedfeed5611\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b8df621f7175690bfb2013eb50e22fbabf3752c7f2e8f3baa776b0a58de79086\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9f8e83d7b32b14efcc8193b51fce5582cde093e1f342e271a53609c80cf6a0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f355a210834e72983fdac925147a3b3968471acdf8d5f66a023d71486ad0446\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.943651 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ac0e409-0044-4f7e-b4f0-565171a4ff9a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33416f808918bee6e658242b05f7b9cdba051177786b6d97e8ccc9154eb6c560\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://077628fb85534f5e3e0bd65ed9f1545b48a8f0d4d5070147ebb79ad669be0ad7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-nmxj6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:52Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-glcqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.955342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-kbcrs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b10879b-1f31-4575-823c-8f39cd85978d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://43df98194ad3de85d7a7d82cc33c8afb5b1569fabf7a0549543d8264d812fc18\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r9vdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-kbcrs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.969228 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:38Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.972923 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.972981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.973000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.973027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.973045 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:50Z","lastTransitionTime":"2025-11-28T15:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:50 crc kubenswrapper[4884]: I1128 15:20:50.988413 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-kk24c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4a16f171-34b0-4f03-80dc-3b9bbd459bcc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T15:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6a63366c5078b12c1e0e1bc42d4fd5f5d149b906ba3f7efac7a11abb876562c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T15:19:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c68a94d8873c73ebaf6373571867147237239c750924448f66305dbd75db29d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://14b584af7eab04f70c96e777b993bf64dd43fa02715e048d99918c6b76c09e3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d735f142ef0b218dbc2df6401a88f3f12220d69d5bc35038f79961ac70841e74\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f0f92c31d78eb5c49dd38325d80c0bf5c02632629c4bbecf7162f23114c6608c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f928b0f68daae11dbd44e237509fe105dee29c0ca1b5a54bb565451cbee4b925\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9c82c87ae02f7d71db1e5f0e07240326ea4be8aab3cff0cab8fd4fd623eb2f3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T15:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T15:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g8b7w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T15:19:39Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-kk24c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:50Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.076486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.076525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.076537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.076555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.076567 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.180048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.180128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.180145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.180169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.180186 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.282984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.283035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.283054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.283075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.283133 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.370339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.370414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.370429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.370448 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.370463 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.385986 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.390704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.390761 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.390779 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.390806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.390823 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.409995 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.414638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.414700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.414718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.414747 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.414768 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.439719 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.445417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.445481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.445506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.445537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.445561 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.462321 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.467016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.467312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.467474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.467633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.467771 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.489543 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T15:20:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"80fb0112-20e3-4d9a-9db2-f4ba712ce894\\\",\\\"systemUUID\\\":\\\"4c40ba81-7d04-41d3-b14d-2c4a4505250b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T15:20:51Z is after 2025-08-24T17:21:41Z" Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.489794 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.491978 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.492022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.492036 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.492055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.492066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.594882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.594951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.594972 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.594997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.595014 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.688330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:51 crc kubenswrapper[4884]: E1128 15:20:51.688519 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.697825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.697897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.697921 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.697946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.697968 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.801488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.801556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.801638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.801718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.801747 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.905181 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.905233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.905256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.905284 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:51 crc kubenswrapper[4884]: I1128 15:20:51.905305 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:51Z","lastTransitionTime":"2025-11-28T15:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.008659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.008723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.008740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.008764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.008781 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.112743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.112789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.112804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.112836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.112853 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.215277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.215330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.215344 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.215365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.215379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.318571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.318896 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.318974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.319075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.319196 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.422286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.422323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.422334 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.422350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.422359 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.525017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.525069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.525081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.525117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.525130 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.628705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.628789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.628808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.628836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.628853 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.688065 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:52 crc kubenswrapper[4884]: E1128 15:20:52.688489 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.688544 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.688560 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:52 crc kubenswrapper[4884]: E1128 15:20:52.688734 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:52 crc kubenswrapper[4884]: E1128 15:20:52.688865 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.731510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.731564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.731576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.731594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.731607 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.834740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.834799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.834850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.834878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.834895 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.937733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.937803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.937844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.937878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:52 crc kubenswrapper[4884]: I1128 15:20:52.937900 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:52Z","lastTransitionTime":"2025-11-28T15:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.041044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.041116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.041128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.041146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.041159 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.143668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.143706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.143717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.143733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.143747 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.246430 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.246484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.246498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.246517 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.246528 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.349476 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.349531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.349546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.349568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.349584 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.457351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.457426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.457469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.457518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.457549 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.561756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.561818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.561842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.561874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.561899 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.665902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.665953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.665964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.665981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.665993 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.688266 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:53 crc kubenswrapper[4884]: E1128 15:20:53.688466 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.768983 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.769074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.769137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.769170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.769193 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.872615 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.872666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.872679 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.872698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.872709 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.975740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.975795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.975812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.975836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:53 crc kubenswrapper[4884]: I1128 15:20:53.975852 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:53Z","lastTransitionTime":"2025-11-28T15:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.078876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.078917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.078927 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.078943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.078953 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.182548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.182603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.182616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.182637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.182654 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.286112 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.286148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.286157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.286173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.286183 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.388637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.388704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.388722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.388750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.388771 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.491824 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.491928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.491945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.491970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.491986 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.594877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.594925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.594939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.594960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.594974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.688000 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.688037 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:54 crc kubenswrapper[4884]: E1128 15:20:54.688262 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.688329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:54 crc kubenswrapper[4884]: E1128 15:20:54.688469 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:54 crc kubenswrapper[4884]: E1128 15:20:54.688684 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.697471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.697538 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.697560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.697587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.697608 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.800865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.800947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.800973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.801002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.801024 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.903870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.903906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.903915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.903935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:54 crc kubenswrapper[4884]: I1128 15:20:54.903946 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:54Z","lastTransitionTime":"2025-11-28T15:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.008268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.008319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.008336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.008361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.008379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.110970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.111023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.111035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.111058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.111072 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.214719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.214795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.214816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.214847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.214868 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.317442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.317508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.317533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.317562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.317584 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.420865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.420954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.420967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.420994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.421009 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.524471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.524525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.524542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.524568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.524581 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.627492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.627554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.627565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.627585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.627597 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.688359 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:55 crc kubenswrapper[4884]: E1128 15:20:55.688524 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.730384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.730445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.730462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.730486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.730503 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.833954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.834032 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.834056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.834135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.834164 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.937138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.937219 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.937234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.937252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:55 crc kubenswrapper[4884]: I1128 15:20:55.937265 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:55Z","lastTransitionTime":"2025-11-28T15:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.039770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.039821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.039838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.039868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.039886 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.143328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.143390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.143412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.143442 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.143466 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.246337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.246411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.246434 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.246466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.246490 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.349291 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.349349 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.349361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.349381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.349395 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.453245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.453296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.453306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.453323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.453334 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.558506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.558572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.558590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.558619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.558639 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.661830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.661887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.661897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.661925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.661938 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.687480 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.687545 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.687545 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:56 crc kubenswrapper[4884]: E1128 15:20:56.687705 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:56 crc kubenswrapper[4884]: E1128 15:20:56.687906 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:56 crc kubenswrapper[4884]: E1128 15:20:56.688151 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.765369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.765509 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.765753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.765785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.765806 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.868453 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.868529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.868548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.868575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.868595 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.972326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.972414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.972438 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.972468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:56 crc kubenswrapper[4884]: I1128 15:20:56.972486 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:56Z","lastTransitionTime":"2025-11-28T15:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.075545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.075612 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.075638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.075670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.075691 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.179072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.179156 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.179169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.179186 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.179200 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.282502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.282566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.282590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.282622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.282644 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.385970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.386037 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.386055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.386081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.386136 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.489481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.489545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.489561 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.489586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.489604 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.537500 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:57 crc kubenswrapper[4884]: E1128 15:20:57.537683 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:57 crc kubenswrapper[4884]: E1128 15:20:57.537789 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs podName:104ccc20-6082-4bdc-bdc7-591fa0b2b2d4 nodeName:}" failed. No retries permitted until 2025-11-28 15:22:01.537761701 +0000 UTC m=+161.100545532 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs") pod "network-metrics-daemon-5nbz9" (UID: "104ccc20-6082-4bdc-bdc7-591fa0b2b2d4") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.592205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.592265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.592282 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.592307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.592326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.687995 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:57 crc kubenswrapper[4884]: E1128 15:20:57.688235 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.695520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.695551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.695560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.695573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.695582 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.798901 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.799036 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.799191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.799244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.799269 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.901751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.901845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.901881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.901914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:57 crc kubenswrapper[4884]: I1128 15:20:57.901941 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:57Z","lastTransitionTime":"2025-11-28T15:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.004613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.004667 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.004682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.004704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.004727 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.107678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.107709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.107718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.107733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.107743 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.210750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.210822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.210842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.210870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.210893 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.313683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.313765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.313798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.313832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.313855 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.422140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.422236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.422260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.422295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.422387 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.525288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.525356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.525379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.525408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.525429 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.628749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.628822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.628846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.628877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.628899 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.687363 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.687486 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:20:58 crc kubenswrapper[4884]: E1128 15:20:58.687563 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.687486 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:20:58 crc kubenswrapper[4884]: E1128 15:20:58.687680 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:20:58 crc kubenswrapper[4884]: E1128 15:20:58.687906 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.688998 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:20:58 crc kubenswrapper[4884]: E1128 15:20:58.689277 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.731485 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.731545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.731558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.731583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.731598 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.834343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.834384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.834395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.834411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.834425 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.937173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.937235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.937254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.937278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:58 crc kubenswrapper[4884]: I1128 15:20:58.937298 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:58Z","lastTransitionTime":"2025-11-28T15:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.040774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.040843 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.040860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.040887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.040911 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.144260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.144337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.144357 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.144383 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.144402 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.248336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.248410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.248428 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.248456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.248477 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.351798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.351866 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.351885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.351912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.351928 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.455316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.455379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.455398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.455421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.455439 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.559249 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.559361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.559383 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.559419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.559440 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.662617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.662691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.662705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.662731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.662752 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.687959 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:20:59 crc kubenswrapper[4884]: E1128 15:20:59.688221 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.766802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.766875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.766885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.766903 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.766913 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.869804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.870277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.870304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.870334 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.870360 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.972969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.973038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.973056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.973081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:20:59 crc kubenswrapper[4884]: I1128 15:20:59.973142 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:20:59Z","lastTransitionTime":"2025-11-28T15:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.076193 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.076262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.076280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.076309 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.076331 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.179489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.179560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.179583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.179611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.179632 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.281845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.281898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.281914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.281938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.281952 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.385245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.385310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.385326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.385350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.385367 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.488917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.488970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.488982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.489003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.489016 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.593352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.593423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.593445 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.593477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.593503 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.688372 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.688428 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:00 crc kubenswrapper[4884]: E1128 15:21:00.688585 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.688632 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:00 crc kubenswrapper[4884]: E1128 15:21:00.689051 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:00 crc kubenswrapper[4884]: E1128 15:21:00.689299 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.696197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.696250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.696262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.696280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.696294 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.749053 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-4kfcr" podStartSLOduration=82.749024918 podStartE2EDuration="1m22.749024918s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.748967267 +0000 UTC m=+100.311751078" watchObservedRunningTime="2025-11-28 15:21:00.749024918 +0000 UTC m=+100.311808759" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.782281 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=82.782256053 podStartE2EDuration="1m22.782256053s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.768072453 +0000 UTC m=+100.330856294" watchObservedRunningTime="2025-11-28 15:21:00.782256053 +0000 UTC m=+100.345039854" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.797987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.798020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.798031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.798047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.798060 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.842415 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=53.84238726 podStartE2EDuration="53.84238726s" podCreationTimestamp="2025-11-28 15:20:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.82943546 +0000 UTC m=+100.392219271" watchObservedRunningTime="2025-11-28 15:21:00.84238726 +0000 UTC m=+100.405171101" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.842899 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=14.842885942 podStartE2EDuration="14.842885942s" podCreationTimestamp="2025-11-28 15:20:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.842153834 +0000 UTC m=+100.404937665" watchObservedRunningTime="2025-11-28 15:21:00.842885942 +0000 UTC m=+100.405669783" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.870202 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=81.870168964 podStartE2EDuration="1m21.870168964s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.868697929 +0000 UTC m=+100.431481740" watchObservedRunningTime="2025-11-28 15:21:00.870168964 +0000 UTC m=+100.432952815" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.899865 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podStartSLOduration=82.899846474 podStartE2EDuration="1m22.899846474s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.899506476 +0000 UTC m=+100.462290277" watchObservedRunningTime="2025-11-28 15:21:00.899846474 +0000 UTC m=+100.462630275" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.900046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.900121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.900141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.900166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.900181 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:00Z","lastTransitionTime":"2025-11-28T15:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.914969 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-zj27d" podStartSLOduration=82.914955125 podStartE2EDuration="1m22.914955125s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.914447163 +0000 UTC m=+100.477230994" watchObservedRunningTime="2025-11-28 15:21:00.914955125 +0000 UTC m=+100.477738926" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.954343 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=82.954322516 podStartE2EDuration="1m22.954322516s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.953897176 +0000 UTC m=+100.516680997" watchObservedRunningTime="2025-11-28 15:21:00.954322516 +0000 UTC m=+100.517106317" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.966597 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-glcqs" podStartSLOduration=81.966572349 podStartE2EDuration="1m21.966572349s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.965589385 +0000 UTC m=+100.528373236" watchObservedRunningTime="2025-11-28 15:21:00.966572349 +0000 UTC m=+100.529356160" Nov 28 15:21:00 crc kubenswrapper[4884]: I1128 15:21:00.986756 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-kk24c" podStartSLOduration=82.986732242 podStartE2EDuration="1m22.986732242s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.986317411 +0000 UTC m=+100.549101242" watchObservedRunningTime="2025-11-28 15:21:00.986732242 +0000 UTC m=+100.549516043" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.000473 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-kbcrs" podStartSLOduration=83.000450679 podStartE2EDuration="1m23.000450679s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:00.999663621 +0000 UTC m=+100.562447422" watchObservedRunningTime="2025-11-28 15:21:01.000450679 +0000 UTC m=+100.563234480" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.004962 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.005397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.005416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.005456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.005472 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.111485 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.111547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.111605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.111632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.111649 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.215076 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.215160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.215168 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.215184 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.215193 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.317937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.318002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.318019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.318042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.318056 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.420460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.420528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.420549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.420575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.420594 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.523059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.523167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.523183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.523209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.523225 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.626196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.626556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.626709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.626859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.627005 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.687325 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:01 crc kubenswrapper[4884]: E1128 15:21:01.687513 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.730168 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.730203 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.730211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.730225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.730235 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.757691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.757933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.758145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.758340 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.758505 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T15:21:01Z","lastTransitionTime":"2025-11-28T15:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.808663 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf"] Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.809538 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.811927 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.812071 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.813883 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.813903 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.886588 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/53852ba6-a6a2-47d8-b22c-21986807145b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.886650 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/53852ba6-a6a2-47d8-b22c-21986807145b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.886692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.886778 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53852ba6-a6a2-47d8-b22c-21986807145b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.886836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.987900 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/53852ba6-a6a2-47d8-b22c-21986807145b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.987960 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/53852ba6-a6a2-47d8-b22c-21986807145b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.988015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.988194 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53852ba6-a6a2-47d8-b22c-21986807145b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.988241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.988359 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.988700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/53852ba6-a6a2-47d8-b22c-21986807145b-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:01 crc kubenswrapper[4884]: I1128 15:21:01.989977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/53852ba6-a6a2-47d8-b22c-21986807145b-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.001030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/53852ba6-a6a2-47d8-b22c-21986807145b-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.016332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/53852ba6-a6a2-47d8-b22c-21986807145b-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4glf\" (UID: \"53852ba6-a6a2-47d8-b22c-21986807145b\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.134692 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.273481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" event={"ID":"53852ba6-a6a2-47d8-b22c-21986807145b","Type":"ContainerStarted","Data":"001d15c953bcca109ceda3c928ff95acd03419f0e4268e6dee31c39579c98ebc"} Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.688410 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.688417 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:02 crc kubenswrapper[4884]: I1128 15:21:02.688883 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:02 crc kubenswrapper[4884]: E1128 15:21:02.689003 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:02 crc kubenswrapper[4884]: E1128 15:21:02.689134 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:02 crc kubenswrapper[4884]: E1128 15:21:02.689509 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:03 crc kubenswrapper[4884]: I1128 15:21:03.278558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" event={"ID":"53852ba6-a6a2-47d8-b22c-21986807145b","Type":"ContainerStarted","Data":"4e8bd306a2aa45292eca588dd0d7b72c2e6c0de98bf4b3319052e5ea953f77d5"} Nov 28 15:21:03 crc kubenswrapper[4884]: I1128 15:21:03.294893 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4glf" podStartSLOduration=85.294870545 podStartE2EDuration="1m25.294870545s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:03.29464308 +0000 UTC m=+102.857426911" watchObservedRunningTime="2025-11-28 15:21:03.294870545 +0000 UTC m=+102.857654356" Nov 28 15:21:03 crc kubenswrapper[4884]: I1128 15:21:03.687281 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:03 crc kubenswrapper[4884]: E1128 15:21:03.687456 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:04 crc kubenswrapper[4884]: I1128 15:21:04.687653 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:04 crc kubenswrapper[4884]: I1128 15:21:04.687783 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:04 crc kubenswrapper[4884]: E1128 15:21:04.688361 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:04 crc kubenswrapper[4884]: E1128 15:21:04.688496 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:04 crc kubenswrapper[4884]: I1128 15:21:04.687841 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:04 crc kubenswrapper[4884]: E1128 15:21:04.688592 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:05 crc kubenswrapper[4884]: I1128 15:21:05.687749 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:05 crc kubenswrapper[4884]: E1128 15:21:05.688725 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:06 crc kubenswrapper[4884]: I1128 15:21:06.687776 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:06 crc kubenswrapper[4884]: E1128 15:21:06.687981 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:06 crc kubenswrapper[4884]: I1128 15:21:06.688018 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:06 crc kubenswrapper[4884]: E1128 15:21:06.688265 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:06 crc kubenswrapper[4884]: I1128 15:21:06.688917 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:06 crc kubenswrapper[4884]: E1128 15:21:06.689217 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:07 crc kubenswrapper[4884]: I1128 15:21:07.687399 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:07 crc kubenswrapper[4884]: E1128 15:21:07.687693 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:08 crc kubenswrapper[4884]: I1128 15:21:08.688132 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:08 crc kubenswrapper[4884]: E1128 15:21:08.688835 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:08 crc kubenswrapper[4884]: I1128 15:21:08.688208 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:08 crc kubenswrapper[4884]: E1128 15:21:08.689203 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:08 crc kubenswrapper[4884]: I1128 15:21:08.688143 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:08 crc kubenswrapper[4884]: E1128 15:21:08.689569 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:09 crc kubenswrapper[4884]: I1128 15:21:09.688183 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:09 crc kubenswrapper[4884]: E1128 15:21:09.688323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:10 crc kubenswrapper[4884]: I1128 15:21:10.690395 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:10 crc kubenswrapper[4884]: I1128 15:21:10.690427 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:10 crc kubenswrapper[4884]: I1128 15:21:10.690449 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:10 crc kubenswrapper[4884]: I1128 15:21:10.691066 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:21:10 crc kubenswrapper[4884]: E1128 15:21:10.691295 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-6wh6q_openshift-ovn-kubernetes(d1b77432-5316-4dd6-a4a9-f74651377bdd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" Nov 28 15:21:10 crc kubenswrapper[4884]: E1128 15:21:10.691424 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:10 crc kubenswrapper[4884]: E1128 15:21:10.691498 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:10 crc kubenswrapper[4884]: E1128 15:21:10.691550 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:11 crc kubenswrapper[4884]: I1128 15:21:11.688143 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:11 crc kubenswrapper[4884]: E1128 15:21:11.688777 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:12 crc kubenswrapper[4884]: I1128 15:21:12.687933 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:12 crc kubenswrapper[4884]: I1128 15:21:12.687966 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:12 crc kubenswrapper[4884]: I1128 15:21:12.688049 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:12 crc kubenswrapper[4884]: E1128 15:21:12.688253 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:12 crc kubenswrapper[4884]: E1128 15:21:12.688408 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:12 crc kubenswrapper[4884]: E1128 15:21:12.688586 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.314146 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/1.log" Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.315301 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/0.log" Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.315377 4884 generic.go:334] "Generic (PLEG): container finished" podID="237d188f-b799-4a82-bc67-c3a8fac5771f" containerID="b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58" exitCode=1 Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.315420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerDied","Data":"b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58"} Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.315466 4884 scope.go:117] "RemoveContainer" containerID="732d48e41b9a517cc0911fd2f5c8a92631f5644c870c4dbcda2a6aa4dce4e0e6" Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.316245 4884 scope.go:117] "RemoveContainer" containerID="b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58" Nov 28 15:21:13 crc kubenswrapper[4884]: E1128 15:21:13.316584 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-zj27d_openshift-multus(237d188f-b799-4a82-bc67-c3a8fac5771f)\"" pod="openshift-multus/multus-zj27d" podUID="237d188f-b799-4a82-bc67-c3a8fac5771f" Nov 28 15:21:13 crc kubenswrapper[4884]: I1128 15:21:13.687989 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:13 crc kubenswrapper[4884]: E1128 15:21:13.688230 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:14 crc kubenswrapper[4884]: I1128 15:21:14.321312 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/1.log" Nov 28 15:21:14 crc kubenswrapper[4884]: I1128 15:21:14.687794 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:14 crc kubenswrapper[4884]: I1128 15:21:14.688050 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:14 crc kubenswrapper[4884]: I1128 15:21:14.688080 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:14 crc kubenswrapper[4884]: E1128 15:21:14.688301 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:14 crc kubenswrapper[4884]: E1128 15:21:14.688385 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:14 crc kubenswrapper[4884]: E1128 15:21:14.688472 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:15 crc kubenswrapper[4884]: I1128 15:21:15.688148 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:15 crc kubenswrapper[4884]: E1128 15:21:15.688511 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:16 crc kubenswrapper[4884]: I1128 15:21:16.688237 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:16 crc kubenswrapper[4884]: I1128 15:21:16.688257 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:16 crc kubenswrapper[4884]: I1128 15:21:16.688482 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:16 crc kubenswrapper[4884]: E1128 15:21:16.688689 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:16 crc kubenswrapper[4884]: E1128 15:21:16.689211 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:16 crc kubenswrapper[4884]: E1128 15:21:16.689432 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:17 crc kubenswrapper[4884]: I1128 15:21:17.687271 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:17 crc kubenswrapper[4884]: E1128 15:21:17.687502 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:18 crc kubenswrapper[4884]: I1128 15:21:18.688172 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:18 crc kubenswrapper[4884]: I1128 15:21:18.688260 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:18 crc kubenswrapper[4884]: I1128 15:21:18.688260 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:18 crc kubenswrapper[4884]: E1128 15:21:18.688378 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:18 crc kubenswrapper[4884]: E1128 15:21:18.688589 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:18 crc kubenswrapper[4884]: E1128 15:21:18.688784 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:19 crc kubenswrapper[4884]: I1128 15:21:19.687449 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:19 crc kubenswrapper[4884]: E1128 15:21:19.687699 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:20 crc kubenswrapper[4884]: E1128 15:21:20.677509 4884 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 15:21:20 crc kubenswrapper[4884]: I1128 15:21:20.687898 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:20 crc kubenswrapper[4884]: I1128 15:21:20.687999 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:20 crc kubenswrapper[4884]: I1128 15:21:20.688133 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:20 crc kubenswrapper[4884]: E1128 15:21:20.689659 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:20 crc kubenswrapper[4884]: E1128 15:21:20.689777 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:20 crc kubenswrapper[4884]: E1128 15:21:20.689940 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:20 crc kubenswrapper[4884]: E1128 15:21:20.798876 4884 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:21:21 crc kubenswrapper[4884]: I1128 15:21:21.688268 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:21 crc kubenswrapper[4884]: E1128 15:21:21.688485 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:22 crc kubenswrapper[4884]: I1128 15:21:22.688155 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:22 crc kubenswrapper[4884]: I1128 15:21:22.688217 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:22 crc kubenswrapper[4884]: I1128 15:21:22.688267 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:22 crc kubenswrapper[4884]: E1128 15:21:22.688391 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:22 crc kubenswrapper[4884]: E1128 15:21:22.688581 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:22 crc kubenswrapper[4884]: E1128 15:21:22.688966 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:23 crc kubenswrapper[4884]: I1128 15:21:23.687396 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:23 crc kubenswrapper[4884]: E1128 15:21:23.687771 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:24 crc kubenswrapper[4884]: I1128 15:21:24.687686 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:24 crc kubenswrapper[4884]: I1128 15:21:24.688181 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:24 crc kubenswrapper[4884]: E1128 15:21:24.688155 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:24 crc kubenswrapper[4884]: I1128 15:21:24.688290 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:24 crc kubenswrapper[4884]: E1128 15:21:24.688481 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:24 crc kubenswrapper[4884]: E1128 15:21:24.688648 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:24 crc kubenswrapper[4884]: I1128 15:21:24.689871 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.366337 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/3.log" Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.368968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerStarted","Data":"f3401938bf7dbfdd67d846882d2e44239a96c28d4a82bbf37d9d50fd4dc57f2f"} Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.369452 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.394620 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podStartSLOduration=107.394604831 podStartE2EDuration="1m47.394604831s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:25.393478664 +0000 UTC m=+124.956262465" watchObservedRunningTime="2025-11-28 15:21:25.394604831 +0000 UTC m=+124.957388622" Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.587414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5nbz9"] Nov 28 15:21:25 crc kubenswrapper[4884]: I1128 15:21:25.587937 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:25 crc kubenswrapper[4884]: E1128 15:21:25.588193 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:25 crc kubenswrapper[4884]: E1128 15:21:25.801413 4884 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:21:26 crc kubenswrapper[4884]: I1128 15:21:26.687759 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:26 crc kubenswrapper[4884]: I1128 15:21:26.687857 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:26 crc kubenswrapper[4884]: I1128 15:21:26.687857 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:26 crc kubenswrapper[4884]: E1128 15:21:26.687984 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:26 crc kubenswrapper[4884]: I1128 15:21:26.688159 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:26 crc kubenswrapper[4884]: E1128 15:21:26.688319 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:26 crc kubenswrapper[4884]: E1128 15:21:26.688430 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:26 crc kubenswrapper[4884]: E1128 15:21:26.688676 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:26 crc kubenswrapper[4884]: I1128 15:21:26.688889 4884 scope.go:117] "RemoveContainer" containerID="b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58" Nov 28 15:21:27 crc kubenswrapper[4884]: I1128 15:21:27.378393 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/1.log" Nov 28 15:21:27 crc kubenswrapper[4884]: I1128 15:21:27.378471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerStarted","Data":"80eb534a7e5eab5045722acb46a009f657d4036e304cfda4b74740290f476161"} Nov 28 15:21:28 crc kubenswrapper[4884]: I1128 15:21:28.688176 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:28 crc kubenswrapper[4884]: I1128 15:21:28.688176 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:28 crc kubenswrapper[4884]: E1128 15:21:28.688575 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:28 crc kubenswrapper[4884]: I1128 15:21:28.688302 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:28 crc kubenswrapper[4884]: E1128 15:21:28.688707 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:28 crc kubenswrapper[4884]: I1128 15:21:28.688275 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:28 crc kubenswrapper[4884]: E1128 15:21:28.688804 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:28 crc kubenswrapper[4884]: E1128 15:21:28.688863 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:30 crc kubenswrapper[4884]: I1128 15:21:30.689444 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:30 crc kubenswrapper[4884]: I1128 15:21:30.689459 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:30 crc kubenswrapper[4884]: I1128 15:21:30.689532 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:30 crc kubenswrapper[4884]: I1128 15:21:30.689662 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:30 crc kubenswrapper[4884]: E1128 15:21:30.691255 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 15:21:30 crc kubenswrapper[4884]: E1128 15:21:30.691403 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 15:21:30 crc kubenswrapper[4884]: E1128 15:21:30.691522 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 15:21:30 crc kubenswrapper[4884]: E1128 15:21:30.691598 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-5nbz9" podUID="104ccc20-6082-4bdc-bdc7-591fa0b2b2d4" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.171662 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.221233 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-66tg2"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.222242 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.222326 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.223151 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.232885 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6tbrh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.233893 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.235450 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.236514 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.237791 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.238552 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.240177 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qzhng"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.245787 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.248260 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.248426 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.253583 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.263909 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.264323 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.267568 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.267917 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4449t"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.269933 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270175 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270366 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270545 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270642 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270734 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270823 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.270932 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271040 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271157 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271270 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271408 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271498 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271654 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271781 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.271870 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.272023 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.272764 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.273217 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.273530 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.273815 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.274105 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.276446 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.276784 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.277021 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.277467 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.277690 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.277548 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.278513 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z9r54"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.278914 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.279222 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.279290 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.279957 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.280563 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.281037 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.281177 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.281316 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.281498 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.281611 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.282193 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.283747 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.284201 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.284513 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gxgjt"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.284928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286587 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pnpw5"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-serving-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298783da-21dc-4586-bfd0-b157b74ce8f7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286926 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286956 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-audit\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.286987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lvq6\" (UniqueName: \"kubernetes.io/projected/298783da-21dc-4586-bfd0-b157b74ce8f7-kube-api-access-4lvq6\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.287014 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.287040 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-audit-dir\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.287062 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-client\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296217 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296314 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-encryption-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296391 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-auth-proxy-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296413 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl9fs\" (UniqueName: \"kubernetes.io/projected/837a51ee-4dbf-439d-9249-d49cd09a0585-kube-api-access-gl9fs\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296461 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-config\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296482 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-image-import-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296504 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296527 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wncj2\" (UniqueName: \"kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-images\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296582 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-node-pullsecrets\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-trusted-ca-bundle\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296628 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296650 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-machine-approver-tls\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296671 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45gjx\" (UniqueName: \"kubernetes.io/projected/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-kube-api-access-45gjx\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b074b05e-c5dd-4818-8808-b6207aff3514-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296762 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drtzv\" (UniqueName: \"kubernetes.io/projected/b074b05e-c5dd-4818-8808-b6207aff3514-kube-api-access-drtzv\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296785 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-serving-cert\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.296837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298783da-21dc-4586-bfd0-b157b74ce8f7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.300369 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.300611 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.301348 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.302486 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.302618 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.302741 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.302849 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.302976 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.303077 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.303206 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.303386 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.303850 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.309731 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.310345 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311264 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311301 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311457 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311581 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311650 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311715 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311756 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.311833 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312240 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312245 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312365 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312464 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312537 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312547 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.312624 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313243 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313348 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313420 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313490 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313581 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313667 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313732 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313794 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313865 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.313928 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.314050 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.314201 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.317941 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-2m5q7"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.318419 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.318435 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6tbrh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.318504 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.325667 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.328950 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.329178 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.329559 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.333180 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.333460 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.333697 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.334500 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.334616 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.338552 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.353935 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.354234 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.354899 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.355330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.354905 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.354945 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.355015 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.355056 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.355187 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.356804 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-66tg2"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.357461 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.358121 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.358224 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.359451 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.359947 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.361036 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-vcmc8"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.362615 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.362788 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.363536 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.363784 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.364082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.364317 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.364439 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.365416 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.365717 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.367262 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.367700 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-rhppm"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.367833 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.368082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.368411 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.371777 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.374202 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.374411 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.374437 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.374585 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.374925 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.376888 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.378394 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.378440 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.378552 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.378902 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.379866 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-whh5g"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.386720 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.395166 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.395615 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397158 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397653 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-config\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397695 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-image-import-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397720 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl9fs\" (UniqueName: \"kubernetes.io/projected/837a51ee-4dbf-439d-9249-d49cd09a0585-kube-api-access-gl9fs\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ae95c6-76f6-4d84-b306-7ab053006feb-serving-cert\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnmgz\" (UniqueName: \"kubernetes.io/projected/88c4733c-f3d7-4718-a865-bd4b9b510fbe-kube-api-access-wnmgz\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397804 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t775\" (UniqueName: \"kubernetes.io/projected/a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5-kube-api-access-7t775\") pod \"downloads-7954f5f757-2m5q7\" (UID: \"a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5\") " pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397852 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wncj2\" (UniqueName: \"kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397875 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397899 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm56v\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-kube-api-access-cm56v\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397940 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-serving-cert\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397958 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7181d6c4-803a-4a92-869c-0f7a69724cb1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397975 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/88c4733c-f3d7-4718-a865-bd4b9b510fbe-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.397995 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-images\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398017 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-node-pullsecrets\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398057 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-trusted-ca-bundle\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398078 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-config\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398115 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398138 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-machine-approver-tls\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398162 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398189 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-audit-policies\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhd4j\" (UniqueName: \"kubernetes.io/projected/c658ccdf-b838-446f-857b-3acd10099f88-kube-api-access-hhd4j\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398270 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-node-pullsecrets\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b074b05e-c5dd-4818-8808-b6207aff3514-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398619 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-config\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45gjx\" (UniqueName: \"kubernetes.io/projected/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-kube-api-access-45gjx\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.398991 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.399130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.399247 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drtzv\" (UniqueName: \"kubernetes.io/projected/b074b05e-c5dd-4818-8808-b6207aff3514-kube-api-access-drtzv\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.399273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-serving-cert\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.399361 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b074b05e-c5dd-4818-8808-b6207aff3514-images\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.399486 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400539 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298783da-21dc-4586-bfd0-b157b74ce8f7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400677 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400708 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400749 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzf9w\" (UniqueName: \"kubernetes.io/projected/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-kube-api-access-vzf9w\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400817 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/06ab8887-8ada-45d6-a104-ab5732219eeb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400904 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-serving-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.400991 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.401019 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.401064 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298783da-21dc-4586-bfd0-b157b74ce8f7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.401136 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lvq6\" (UniqueName: \"kubernetes.io/projected/298783da-21dc-4586-bfd0-b157b74ce8f7-kube-api-access-4lvq6\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.401783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/298783da-21dc-4586-bfd0-b157b74ce8f7-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402049 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402266 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-service-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402052 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-trusted-ca-bundle\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402307 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-audit\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.402352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403171 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-encryption-config\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403197 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvl8m\" (UniqueName: \"kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403224 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403306 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403346 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm92x\" (UniqueName: \"kubernetes.io/projected/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-kube-api-access-mm92x\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403407 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-audit-dir\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403426 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403608 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-audit\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.403637 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-client\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.404170 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnnpg\" (UniqueName: \"kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.404203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-trusted-ca\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.404735 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406046 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/837a51ee-4dbf-439d-9249-d49cd09a0585-audit-dir\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406640 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-machine-approver-tls\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406744 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvl9n\" (UniqueName: \"kubernetes.io/projected/58ae95c6-76f6-4d84-b306-7ab053006feb-kube-api-access-bvl9n\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406814 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-serving-cert\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c658ccdf-b838-446f-857b-3acd10099f88-audit-dir\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406877 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.407634 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/298783da-21dc-4586-bfd0-b157b74ce8f7-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.408002 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-serving-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.422637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.406670 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/837a51ee-4dbf-439d-9249-d49cd09a0585-image-import-ca\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.428484 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.428986 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-etcd-client\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429110 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rscdf\" (UniqueName: \"kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429166 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-config\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429194 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-serving-cert\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-encryption-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429264 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429283 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-client\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429370 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429406 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-etcd-client\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429428 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429447 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429473 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/06ab8887-8ada-45d6-a104-ab5732219eeb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429503 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp5jb\" (UniqueName: \"kubernetes.io/projected/7181d6c4-803a-4a92-869c-0f7a69724cb1-kube-api-access-qp5jb\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429539 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429564 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-config\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429610 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-service-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88c4733c-f3d7-4718-a865-bd4b9b510fbe-serving-cert\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429673 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-auth-proxy-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.429695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.430309 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b074b05e-c5dd-4818-8808-b6207aff3514-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.431050 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.431710 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.431711 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.431735 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-serving-cert\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.431798 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.432110 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pztqc"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.432245 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.432462 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.432689 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-auth-proxy-config\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.433779 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.434923 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.435379 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.435636 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.435961 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.436528 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.436857 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.437427 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.437597 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.437781 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/837a51ee-4dbf-439d-9249-d49cd09a0585-encryption-config\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.438339 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.439366 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.439740 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.440738 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pfrvj"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.441864 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.443756 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.444307 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.446243 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.449161 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.454623 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.457041 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.458073 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-vcmc8"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.459079 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qzhng"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.460128 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4449t"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.461224 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.463681 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.465134 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.465211 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.466446 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.467570 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.468835 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gxgjt"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.470066 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z9r54"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.471506 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.472998 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.474638 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.476426 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.477812 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.479161 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.482149 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-whh5g"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.483174 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.484404 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.485119 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.485764 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-wc8hv"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.486520 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.486945 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.487897 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pnpw5"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.488889 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-mllmk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.490156 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.490949 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pztqc"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.492745 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-mllmk"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.494732 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2m5q7"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.496054 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.503263 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.504456 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.504640 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.505718 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.506740 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pfrvj"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.507762 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.508772 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-27rjh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.509394 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.510302 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-27rjh"] Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.525390 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530863 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-client\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530907 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/06ab8887-8ada-45d6-a104-ab5732219eeb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530932 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-etcd-client\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530956 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530978 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp5jb\" (UniqueName: \"kubernetes.io/projected/7181d6c4-803a-4a92-869c-0f7a69724cb1-kube-api-access-qp5jb\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.530998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531021 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-config\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531045 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531076 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531114 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-service-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531137 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88c4733c-f3d7-4718-a865-bd4b9b510fbe-serving-cert\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ae95c6-76f6-4d84-b306-7ab053006feb-serving-cert\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531209 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnmgz\" (UniqueName: \"kubernetes.io/projected/88c4733c-f3d7-4718-a865-bd4b9b510fbe-kube-api-access-wnmgz\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-stats-auth\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531259 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-metrics-certs\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531288 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t775\" (UniqueName: \"kubernetes.io/projected/a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5-kube-api-access-7t775\") pod \"downloads-7954f5f757-2m5q7\" (UID: \"a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5\") " pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-serving-cert\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531337 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7181d6c4-803a-4a92-869c-0f7a69724cb1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531361 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531409 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm56v\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-kube-api-access-cm56v\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531434 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/88c4733c-f3d7-4718-a865-bd4b9b510fbe-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531461 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-config\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531484 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531506 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-audit-policies\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531529 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhd4j\" (UniqueName: \"kubernetes.io/projected/c658ccdf-b838-446f-857b-3acd10099f88-kube-api-access-hhd4j\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531554 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531580 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531633 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531704 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531736 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzf9w\" (UniqueName: \"kubernetes.io/projected/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-kube-api-access-vzf9w\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531773 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531796 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/06ab8887-8ada-45d6-a104-ab5732219eeb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531820 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-service-ca-bundle\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531899 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531920 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-service-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.531980 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-encryption-config\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532008 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvl8m\" (UniqueName: \"kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-config\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532248 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532473 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532532 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532561 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532597 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532634 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-default-certificate\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532666 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532733 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm92x\" (UniqueName: \"kubernetes.io/projected/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-kube-api-access-mm92x\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532766 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbfxt\" (UniqueName: \"kubernetes.io/projected/aa29b5a0-3a7e-41e2-92c2-544ddda5505d-kube-api-access-dbfxt\") pod \"migrator-59844c95c7-262rm\" (UID: \"aa29b5a0-3a7e-41e2-92c2-544ddda5505d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532799 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnnpg\" (UniqueName: \"kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532858 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-trusted-ca\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532885 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpgv4\" (UniqueName: \"kubernetes.io/projected/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-kube-api-access-vpgv4\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532904 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c658ccdf-b838-446f-857b-3acd10099f88-audit-dir\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532927 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532943 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvl9n\" (UniqueName: \"kubernetes.io/projected/58ae95c6-76f6-4d84-b306-7ab053006feb-kube-api-access-bvl9n\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532964 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-serving-cert\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532981 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rscdf\" (UniqueName: \"kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.532998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-config\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533014 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-serving-cert\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533017 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-service-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533035 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533231 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-ca\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533431 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-service-ca-bundle\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533804 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-audit-policies\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.533858 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.534141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/88c4733c-f3d7-4718-a865-bd4b9b510fbe-available-featuregates\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.534374 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.534543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.535204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.535586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88c4733c-f3d7-4718-a865-bd4b9b510fbe-serving-cert\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.535652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.535711 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-etcd-client\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.535928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/06ab8887-8ada-45d6-a104-ab5732219eeb-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536057 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c658ccdf-b838-446f-857b-3acd10099f88-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536191 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-config\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536217 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c658ccdf-b838-446f-857b-3acd10099f88-audit-dir\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536377 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536421 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/58ae95c6-76f6-4d84-b306-7ab053006feb-trusted-ca\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.536740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-config\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.537242 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.537395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.537690 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.537890 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.538266 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-serving-cert\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.538334 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58ae95c6-76f6-4d84-b306-7ab053006feb-serving-cert\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.538575 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/06ab8887-8ada-45d6-a104-ab5732219eeb-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.538789 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-encryption-config\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539061 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-serving-cert\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539319 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539353 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-etcd-client\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539784 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539825 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.539984 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.540832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.543759 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.544521 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/7181d6c4-803a-4a92-869c-0f7a69724cb1-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.545335 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.546356 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c658ccdf-b838-446f-857b-3acd10099f88-serving-cert\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.564547 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.584863 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.604125 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.626604 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634448 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpgv4\" (UniqueName: \"kubernetes.io/projected/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-kube-api-access-vpgv4\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-stats-auth\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634553 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-metrics-certs\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-service-ca-bundle\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634659 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-default-certificate\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.634682 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbfxt\" (UniqueName: \"kubernetes.io/projected/aa29b5a0-3a7e-41e2-92c2-544ddda5505d-kube-api-access-dbfxt\") pod \"migrator-59844c95c7-262rm\" (UID: \"aa29b5a0-3a7e-41e2-92c2-544ddda5505d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.645189 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.663969 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.685038 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.687382 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.687554 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.687666 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.687842 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.705615 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.724499 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.744913 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.764020 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.784973 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.804445 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.825042 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.844936 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.865459 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.885733 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.904794 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.925027 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.944835 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.965587 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 15:21:32 crc kubenswrapper[4884]: I1128 15:21:32.984882 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.000594 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-metrics-certs\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.004852 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.024685 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.029861 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-default-certificate\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.045750 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.060578 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-stats-auth\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.065871 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.076877 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-service-ca-bundle\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.085375 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.104607 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.126745 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.145507 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.176904 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.185328 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.205609 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.225208 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.246589 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.266064 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.286596 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.305262 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.326746 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.345511 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.366525 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.386074 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.403306 4884 request.go:700] Waited for 1.004899266s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa/token Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.429376 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl9fs\" (UniqueName: \"kubernetes.io/projected/837a51ee-4dbf-439d-9249-d49cd09a0585-kube-api-access-gl9fs\") pod \"apiserver-76f77b778f-66tg2\" (UID: \"837a51ee-4dbf-439d-9249-d49cd09a0585\") " pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.452702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wncj2\" (UniqueName: \"kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2\") pod \"controller-manager-879f6c89f-2k4vg\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.473292 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45gjx\" (UniqueName: \"kubernetes.io/projected/94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d-kube-api-access-45gjx\") pod \"machine-approver-56656f9798-c7d2l\" (UID: \"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.490560 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drtzv\" (UniqueName: \"kubernetes.io/projected/b074b05e-c5dd-4818-8808-b6207aff3514-kube-api-access-drtzv\") pod \"machine-api-operator-5694c8668f-6tbrh\" (UID: \"b074b05e-c5dd-4818-8808-b6207aff3514\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.506443 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.507434 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lvq6\" (UniqueName: \"kubernetes.io/projected/298783da-21dc-4586-bfd0-b157b74ce8f7-kube-api-access-4lvq6\") pod \"openshift-apiserver-operator-796bbdcf4f-7c6tz\" (UID: \"298783da-21dc-4586-bfd0-b157b74ce8f7\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.508751 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.526246 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.527226 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.545828 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.566649 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.580462 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.585426 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.600194 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.606755 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.627736 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.632952 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" Nov 28 15:21:33 crc kubenswrapper[4884]: W1128 15:21:33.638139 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94b4a7af_0fb7_42d0_8ff5_80ef2a1ae02d.slice/crio-b5fbc8173f28a27f012cf9e1b076807505495c46ab50bc4878236967112dcb12 WatchSource:0}: Error finding container b5fbc8173f28a27f012cf9e1b076807505495c46ab50bc4878236967112dcb12: Status 404 returned error can't find the container with id b5fbc8173f28a27f012cf9e1b076807505495c46ab50bc4878236967112dcb12 Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.645075 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.665741 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.684988 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.704966 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.727836 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.746732 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.765529 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.786400 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.796753 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.805035 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:21:33 crc kubenswrapper[4884]: W1128 15:21:33.808957 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9051d787_06db_42b2_846a_231f40dc737c.slice/crio-e199d95b5850977db58020875424756736d9c7b7c923d2726f1b10ad0f2790a3 WatchSource:0}: Error finding container e199d95b5850977db58020875424756736d9c7b7c923d2726f1b10ad0f2790a3: Status 404 returned error can't find the container with id e199d95b5850977db58020875424756736d9c7b7c923d2726f1b10ad0f2790a3 Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.823698 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-6tbrh"] Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.825689 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: W1128 15:21:33.829784 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb074b05e_c5dd_4818_8808_b6207aff3514.slice/crio-790e9c685f0db9f58059549feeb92c484d94ae37687ee66c5236791785d9be5c WatchSource:0}: Error finding container 790e9c685f0db9f58059549feeb92c484d94ae37687ee66c5236791785d9be5c: Status 404 returned error can't find the container with id 790e9c685f0db9f58059549feeb92c484d94ae37687ee66c5236791785d9be5c Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.844627 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.862964 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz"] Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.864765 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.884263 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: W1128 15:21:33.892120 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod298783da_21dc_4586_bfd0_b157b74ce8f7.slice/crio-147c219450e029433f87877b9f426dd204d92315d6166f3afffa4df7577cd4c4 WatchSource:0}: Error finding container 147c219450e029433f87877b9f426dd204d92315d6166f3afffa4df7577cd4c4: Status 404 returned error can't find the container with id 147c219450e029433f87877b9f426dd204d92315d6166f3afffa4df7577cd4c4 Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.911711 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.924712 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.944166 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 15:21:33 crc kubenswrapper[4884]: I1128 15:21:33.984802 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.002043 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-66tg2"] Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.005115 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.024702 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.059394 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.065292 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.085315 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.105644 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.125028 4884 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.145226 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.165705 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.206223 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.226338 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.246470 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.265384 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.285774 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.305332 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.325084 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.345559 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.364999 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.385462 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.403553 4884 request.go:700] Waited for 1.871708444s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/serviceaccounts/openshift-config-operator/token Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.410384 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" event={"ID":"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d","Type":"ContainerStarted","Data":"b5fbc8173f28a27f012cf9e1b076807505495c46ab50bc4878236967112dcb12"} Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.411434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" event={"ID":"298783da-21dc-4586-bfd0-b157b74ce8f7","Type":"ContainerStarted","Data":"147c219450e029433f87877b9f426dd204d92315d6166f3afffa4df7577cd4c4"} Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.412388 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" event={"ID":"b074b05e-c5dd-4818-8808-b6207aff3514","Type":"ContainerStarted","Data":"790e9c685f0db9f58059549feeb92c484d94ae37687ee66c5236791785d9be5c"} Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.413670 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" event={"ID":"9051d787-06db-42b2-846a-231f40dc737c","Type":"ContainerStarted","Data":"e199d95b5850977db58020875424756736d9c7b7c923d2726f1b10ad0f2790a3"} Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.424437 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnmgz\" (UniqueName: \"kubernetes.io/projected/88c4733c-f3d7-4718-a865-bd4b9b510fbe-kube-api-access-wnmgz\") pod \"openshift-config-operator-7777fb866f-qzhng\" (UID: \"88c4733c-f3d7-4718-a865-bd4b9b510fbe\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:34 crc kubenswrapper[4884]: W1128 15:21:34.438049 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod837a51ee_4dbf_439d_9249_d49cd09a0585.slice/crio-8e40c6b8107290518e3bdffd7e99e2321081e2b5c31a886ab529cd43fe88826d WatchSource:0}: Error finding container 8e40c6b8107290518e3bdffd7e99e2321081e2b5c31a886ab529cd43fe88826d: Status 404 returned error can't find the container with id 8e40c6b8107290518e3bdffd7e99e2321081e2b5c31a886ab529cd43fe88826d Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.450144 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp5jb\" (UniqueName: \"kubernetes.io/projected/7181d6c4-803a-4a92-869c-0f7a69724cb1-kube-api-access-qp5jb\") pod \"cluster-samples-operator-665b6dd947-qxzxh\" (UID: \"7181d6c4-803a-4a92-869c-0f7a69724cb1\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.473834 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm56v\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-kube-api-access-cm56v\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.487393 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t775\" (UniqueName: \"kubernetes.io/projected/a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5-kube-api-access-7t775\") pod \"downloads-7954f5f757-2m5q7\" (UID: \"a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5\") " pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.506908 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzf9w\" (UniqueName: \"kubernetes.io/projected/999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b-kube-api-access-vzf9w\") pod \"etcd-operator-b45778765-gxgjt\" (UID: \"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b\") " pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.525939 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnnpg\" (UniqueName: \"kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg\") pod \"route-controller-manager-6576b87f9c-vs6bp\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.540591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhd4j\" (UniqueName: \"kubernetes.io/projected/c658ccdf-b838-446f-857b-3acd10099f88-kube-api-access-hhd4j\") pod \"apiserver-7bbb656c7d-vrs25\" (UID: \"c658ccdf-b838-446f-857b-3acd10099f88\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.547640 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.568037 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvl8m\" (UniqueName: \"kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m\") pod \"console-f9d7485db-tqjn2\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.583867 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rscdf\" (UniqueName: \"kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf\") pod \"oauth-openshift-558db77b4-4449t\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.590707 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.599062 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.602899 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/06ab8887-8ada-45d6-a104-ab5732219eeb-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7bnd\" (UID: \"06ab8887-8ada-45d6-a104-ab5732219eeb\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.614265 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.615269 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.618448 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm92x\" (UniqueName: \"kubernetes.io/projected/fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13-kube-api-access-mm92x\") pod \"authentication-operator-69f744f599-pnpw5\" (UID: \"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.623900 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.629248 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.644155 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvl9n\" (UniqueName: \"kubernetes.io/projected/58ae95c6-76f6-4d84-b306-7ab053006feb-kube-api-access-bvl9n\") pod \"console-operator-58897d9998-z9r54\" (UID: \"58ae95c6-76f6-4d84-b306-7ab053006feb\") " pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.664638 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpgv4\" (UniqueName: \"kubernetes.io/projected/5b49f112-ca99-4bb7-9aaf-3aff77fbedaf-kube-api-access-vpgv4\") pod \"router-default-5444994796-rhppm\" (UID: \"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf\") " pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.666855 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.686029 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.687797 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbfxt\" (UniqueName: \"kubernetes.io/projected/aa29b5a0-3a7e-41e2-92c2-544ddda5505d-kube-api-access-dbfxt\") pod \"migrator-59844c95c7-262rm\" (UID: \"aa29b5a0-3a7e-41e2-92c2-544ddda5505d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.710577 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.724662 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.735333 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.746878 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-qzhng"] Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.749741 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.752423 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.765708 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.784877 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 15:21:34 crc kubenswrapper[4884]: W1128 15:21:34.797933 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88c4733c_f3d7_4718_a865_bd4b9b510fbe.slice/crio-52cba02de73fb40412fb4d76077d53d801dc4a5c042a5fd0a97acf9ad0e89339 WatchSource:0}: Error finding container 52cba02de73fb40412fb4d76077d53d801dc4a5c042a5fd0a97acf9ad0e89339: Status 404 returned error can't find the container with id 52cba02de73fb40412fb4d76077d53d801dc4a5c042a5fd0a97acf9ad0e89339 Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.864992 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866258 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-srv-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866755 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-key\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866846 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866870 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87ksd\" (UniqueName: \"kubernetes.io/projected/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-kube-api-access-87ksd\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866905 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdc11347-1937-480e-9498-a57c90434b82-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwgdj\" (UniqueName: \"kubernetes.io/projected/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-kube-api-access-fwgdj\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866975 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-registration-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.866996 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hjvz\" (UniqueName: \"kubernetes.io/projected/d5f065cb-816a-4305-8c57-171ddb1ffad6-kube-api-access-7hjvz\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867016 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/afe08e68-6cb9-4a66-926d-e1361a7249ec-metrics-tls\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27fe3a18-ed42-4d55-b718-528dab100bac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867154 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67917625-49ac-4f8f-b9d9-d07699acad03-proxy-tls\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867218 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867267 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867347 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867367 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1b82c382-0692-4ad4-8764-46cf8d97681a-tmpfs\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867389 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-webhook-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867408 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-cabundle\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867429 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27fe3a18-ed42-4d55-b718-528dab100bac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867481 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggptj\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-kube-api-access-ggptj\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867533 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jltt\" (UniqueName: \"kubernetes.io/projected/fdc11347-1937-480e-9498-a57c90434b82-kube-api-access-5jltt\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.867816 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871028 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-bound-sa-token\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3c26ad40-bf40-4023-92f2-e8753c7659e5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871209 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bnkt\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afe08e68-6cb9-4a66-926d-e1361a7249ec-trusted-ca\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871283 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncgpk\" (UniqueName: \"kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871305 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-apiservice-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871350 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7dck\" (UniqueName: \"kubernetes.io/projected/3c26ad40-bf40-4023-92f2-e8753c7659e5-kube-api-access-c7dck\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871373 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b786443-445f-4761-a641-5015eb9f89e2-metrics-tls\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871396 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871420 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871457 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871479 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.871511 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e48ffa6-db35-47f0-bc85-70128ced240c-proxy-tls\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.873603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-config\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.873644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnws6\" (UniqueName: \"kubernetes.io/projected/3b786443-445f-4761-a641-5015eb9f89e2-kube-api-access-gnws6\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.873667 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks6dw\" (UniqueName: \"kubernetes.io/projected/5f95e883-844c-43e5-b2a4-1828e38634c3-kube-api-access-ks6dw\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.873690 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.872796 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.873740 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67917625-49ac-4f8f-b9d9-d07699acad03-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.874446 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfkkw\" (UniqueName: \"kubernetes.io/projected/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-kube-api-access-pfkkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.874511 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.874535 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27fe3a18-ed42-4d55-b718-528dab100bac-config\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.875054 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-images\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.875126 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-socket-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.875150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv4xf\" (UniqueName: \"kubernetes.io/projected/669c7bb0-9847-45c5-a864-e1e30beb5c7d-kube-api-access-sv4xf\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-srv-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876168 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876194 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fdc11347-1937-480e-9498-a57c90434b82-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: E1128 15:21:34.876274 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.376201814 +0000 UTC m=+134.938985725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfxkr\" (UniqueName: \"kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876348 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9rgc\" (UniqueName: \"kubernetes.io/projected/3866e53c-b4ea-4a15-92f2-27f049e0304e-kube-api-access-z9rgc\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876447 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-config\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-mountpoint-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876533 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64cd7\" (UniqueName: \"kubernetes.io/projected/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-kube-api-access-64cd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqw67\" (UniqueName: \"kubernetes.io/projected/0e48ffa6-db35-47f0-bc85-70128ced240c-kube-api-access-cqw67\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876623 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-plugins-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876641 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-serving-cert\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/669c7bb0-9847-45c5-a864-e1e30beb5c7d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qkf2\" (UniqueName: \"kubernetes.io/projected/1b82c382-0692-4ad4-8764-46cf8d97681a-kube-api-access-7qkf2\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.876739 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.877273 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-csi-data-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.877311 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.877334 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bppks\" (UniqueName: \"kubernetes.io/projected/67917625-49ac-4f8f-b9d9-d07699acad03-kube-api-access-bppks\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.877357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-profile-collector-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.877411 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.883381 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.977933 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978194 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-srv-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978248 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-key\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978262 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978290 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87ksd\" (UniqueName: \"kubernetes.io/projected/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-kube-api-access-87ksd\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978308 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdc11347-1937-480e-9498-a57c90434b82-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978324 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwgdj\" (UniqueName: \"kubernetes.io/projected/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-kube-api-access-fwgdj\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978343 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb6x9\" (UniqueName: \"kubernetes.io/projected/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-kube-api-access-pb6x9\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978382 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frs5n\" (UniqueName: \"kubernetes.io/projected/8f0f2356-d186-4942-8803-59247f45ced2-kube-api-access-frs5n\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-registration-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978413 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hjvz\" (UniqueName: \"kubernetes.io/projected/d5f065cb-816a-4305-8c57-171ddb1ffad6-kube-api-access-7hjvz\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978428 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/afe08e68-6cb9-4a66-926d-e1361a7249ec-metrics-tls\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978443 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27fe3a18-ed42-4d55-b718-528dab100bac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978460 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0f2356-d186-4942-8803-59247f45ced2-cert\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978478 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67917625-49ac-4f8f-b9d9-d07699acad03-proxy-tls\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978493 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c125ef33-e37f-4905-9c0b-16477e86e9d3-config-volume\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978510 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978548 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978587 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1b82c382-0692-4ad4-8764-46cf8d97681a-tmpfs\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-webhook-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978618 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-cabundle\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27fe3a18-ed42-4d55-b718-528dab100bac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggptj\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-kube-api-access-ggptj\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-bound-sa-token\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978694 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jltt\" (UniqueName: \"kubernetes.io/projected/fdc11347-1937-480e-9498-a57c90434b82-kube-api-access-5jltt\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978709 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978724 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978771 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3c26ad40-bf40-4023-92f2-e8753c7659e5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978786 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bnkt\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978803 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afe08e68-6cb9-4a66-926d-e1361a7249ec-trusted-ca\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978818 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncgpk\" (UniqueName: \"kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978857 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-apiservice-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978873 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7dck\" (UniqueName: \"kubernetes.io/projected/3c26ad40-bf40-4023-92f2-e8753c7659e5-kube-api-access-c7dck\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978889 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c125ef33-e37f-4905-9c0b-16477e86e9d3-metrics-tls\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978906 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b786443-445f-4761-a641-5015eb9f89e2-metrics-tls\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978923 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978940 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-certs\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978975 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.978990 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979012 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e48ffa6-db35-47f0-bc85-70128ced240c-proxy-tls\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979055 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-config\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnws6\" (UniqueName: \"kubernetes.io/projected/3b786443-445f-4761-a641-5015eb9f89e2-kube-api-access-gnws6\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979113 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks6dw\" (UniqueName: \"kubernetes.io/projected/5f95e883-844c-43e5-b2a4-1828e38634c3-kube-api-access-ks6dw\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979130 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979156 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67917625-49ac-4f8f-b9d9-d07699acad03-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979172 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfkkw\" (UniqueName: \"kubernetes.io/projected/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-kube-api-access-pfkkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979201 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27fe3a18-ed42-4d55-b718-528dab100bac-config\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979242 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6spl4\" (UniqueName: \"kubernetes.io/projected/c125ef33-e37f-4905-9c0b-16477e86e9d3-kube-api-access-6spl4\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979285 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-images\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-socket-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979419 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv4xf\" (UniqueName: \"kubernetes.io/projected/669c7bb0-9847-45c5-a864-e1e30beb5c7d-kube-api-access-sv4xf\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-srv-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979542 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9rgc\" (UniqueName: \"kubernetes.io/projected/3866e53c-b4ea-4a15-92f2-27f049e0304e-kube-api-access-z9rgc\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979633 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fdc11347-1937-480e-9498-a57c90434b82-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979655 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfxkr\" (UniqueName: \"kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-config\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-mountpoint-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979766 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979793 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64cd7\" (UniqueName: \"kubernetes.io/projected/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-kube-api-access-64cd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979818 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/669c7bb0-9847-45c5-a864-e1e30beb5c7d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979848 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-images\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979856 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqw67\" (UniqueName: \"kubernetes.io/projected/0e48ffa6-db35-47f0-bc85-70128ced240c-kube-api-access-cqw67\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979884 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-plugins-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979904 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-serving-cert\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979924 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qkf2\" (UniqueName: \"kubernetes.io/projected/1b82c382-0692-4ad4-8764-46cf8d97681a-kube-api-access-7qkf2\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979942 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979959 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-csi-data-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979975 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.979991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bppks\" (UniqueName: \"kubernetes.io/projected/67917625-49ac-4f8f-b9d9-d07699acad03-kube-api-access-bppks\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.980010 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-profile-collector-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.980030 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-node-bootstrap-token\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:34 crc kubenswrapper[4884]: E1128 15:21:34.980545 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.480524778 +0000 UTC m=+135.043308579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.980630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-registration-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.980965 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1b82c382-0692-4ad4-8764-46cf8d97681a-tmpfs\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.981242 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-mountpoint-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.982623 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afe08e68-6cb9-4a66-926d-e1361a7249ec-trusted-ca\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.984066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.984117 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.985461 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fdc11347-1937-480e-9498-a57c90434b82-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.986401 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/afe08e68-6cb9-4a66-926d-e1361a7249ec-metrics-tls\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.986492 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-apiservice-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.987433 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.987520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.988502 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67917625-49ac-4f8f-b9d9-d07699acad03-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.989643 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.991138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-key\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.991497 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/3c26ad40-bf40-4023-92f2-e8753c7659e5-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.992546 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-srv-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.993498 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-srv-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.994264 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-csi-data-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.994291 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.994513 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-plugins-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.994928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-config\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.994967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27fe3a18-ed42-4d55-b718-528dab100bac-config\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.995405 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1b82c382-0692-4ad4-8764-46cf8d97681a-webhook-cert\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.995739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.995881 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.996176 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e48ffa6-db35-47f0-bc85-70128ced240c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.996842 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5f95e883-844c-43e5-b2a4-1828e38634c3-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.997084 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3b786443-445f-4761-a641-5015eb9f89e2-metrics-tls\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.997389 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.997520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.997522 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-config\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.998590 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27fe3a18-ed42-4d55-b718-528dab100bac-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.998612 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.999045 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.999827 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:34 crc kubenswrapper[4884]: I1128 15:21:34.999891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-socket-dir\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.001652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d5f065cb-816a-4305-8c57-171ddb1ffad6-signing-cabundle\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.002348 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.002758 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.002944 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/669c7bb0-9847-45c5-a864-e1e30beb5c7d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.003432 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67917625-49ac-4f8f-b9d9-d07699acad03-proxy-tls\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.004337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0e48ffa6-db35-47f0-bc85-70128ced240c-proxy-tls\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.005366 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/3866e53c-b4ea-4a15-92f2-27f049e0304e-profile-collector-cert\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.005431 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fdc11347-1937-480e-9498-a57c90434b82-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.006520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-serving-cert\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.029958 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqw67\" (UniqueName: \"kubernetes.io/projected/0e48ffa6-db35-47f0-bc85-70128ced240c-kube-api-access-cqw67\") pod \"machine-config-operator-74547568cd-h65ns\" (UID: \"0e48ffa6-db35-47f0-bc85-70128ced240c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.046743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.054191 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-bound-sa-token\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.080463 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfxkr\" (UniqueName: \"kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr\") pod \"marketplace-operator-79b997595-9frzk\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081220 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6spl4\" (UniqueName: \"kubernetes.io/projected/c125ef33-e37f-4905-9c0b-16477e86e9d3-kube-api-access-6spl4\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081363 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-node-bootstrap-token\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb6x9\" (UniqueName: \"kubernetes.io/projected/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-kube-api-access-pb6x9\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081435 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frs5n\" (UniqueName: \"kubernetes.io/projected/8f0f2356-d186-4942-8803-59247f45ced2-kube-api-access-frs5n\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0f2356-d186-4942-8803-59247f45ced2-cert\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c125ef33-e37f-4905-9c0b-16477e86e9d3-config-volume\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081548 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081682 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c125ef33-e37f-4905-9c0b-16477e86e9d3-metrics-tls\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.081717 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-certs\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.083165 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.583147468 +0000 UTC m=+135.145931339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.092839 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-certs\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.093555 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c125ef33-e37f-4905-9c0b-16477e86e9d3-config-volume\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.093685 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-node-bootstrap-token\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.094406 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8f0f2356-d186-4942-8803-59247f45ced2-cert\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.094422 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/c125ef33-e37f-4905-9c0b-16477e86e9d3-metrics-tls\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.100810 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.107836 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv4xf\" (UniqueName: \"kubernetes.io/projected/669c7bb0-9847-45c5-a864-e1e30beb5c7d-kube-api-access-sv4xf\") pod \"package-server-manager-789f6589d5-db7mb\" (UID: \"669c7bb0-9847-45c5-a864-e1e30beb5c7d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.109517 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hjvz\" (UniqueName: \"kubernetes.io/projected/d5f065cb-816a-4305-8c57-171ddb1ffad6-kube-api-access-7hjvz\") pod \"service-ca-9c57cc56f-pztqc\" (UID: \"d5f065cb-816a-4305-8c57-171ddb1ffad6\") " pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.160541 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.161959 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnws6\" (UniqueName: \"kubernetes.io/projected/3b786443-445f-4761-a641-5015eb9f89e2-kube-api-access-gnws6\") pod \"dns-operator-744455d44c-vcmc8\" (UID: \"3b786443-445f-4761-a641-5015eb9f89e2\") " pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.162692 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/27fe3a18-ed42-4d55-b718-528dab100bac-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-qt8mw\" (UID: \"27fe3a18-ed42-4d55-b718-528dab100bac\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.171123 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bnkt\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.184052 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.184693 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.684673057 +0000 UTC m=+135.247456858 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.245145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87ksd\" (UniqueName: \"kubernetes.io/projected/c583b5ab-7db2-4bf8-a644-2abe08ab7e4e-kube-api-access-87ksd\") pod \"csi-hostpathplugin-pfrvj\" (UID: \"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e\") " pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.245204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jltt\" (UniqueName: \"kubernetes.io/projected/fdc11347-1937-480e-9498-a57c90434b82-kube-api-access-5jltt\") pod \"openshift-controller-manager-operator-756b6f6bc6-7hcwg\" (UID: \"fdc11347-1937-480e-9498-a57c90434b82\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.258947 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9rgc\" (UniqueName: \"kubernetes.io/projected/3866e53c-b4ea-4a15-92f2-27f049e0304e-kube-api-access-z9rgc\") pod \"catalog-operator-68c6474976-74vl8\" (UID: \"3866e53c-b4ea-4a15-92f2-27f049e0304e\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.259786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7dck\" (UniqueName: \"kubernetes.io/projected/3c26ad40-bf40-4023-92f2-e8753c7659e5-kube-api-access-c7dck\") pod \"multus-admission-controller-857f4d67dd-whh5g\" (UID: \"3c26ad40-bf40-4023-92f2-e8753c7659e5\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.286178 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.286535 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.786521895 +0000 UTC m=+135.349305696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.296854 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.297803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncgpk\" (UniqueName: \"kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk\") pod \"collect-profiles-29405715-lm54v\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.301328 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.306466 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.341551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks6dw\" (UniqueName: \"kubernetes.io/projected/5f95e883-844c-43e5-b2a4-1828e38634c3-kube-api-access-ks6dw\") pod \"olm-operator-6b444d44fb-wjmmn\" (UID: \"5f95e883-844c-43e5-b2a4-1828e38634c3\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.342434 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.356285 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8883bc8-819f-4441-b7ba-df49cdd9fdc8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-ds8wc\" (UID: \"d8883bc8-819f-4441-b7ba-df49cdd9fdc8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.358980 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.365872 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.372261 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0df6d13d-89fc-45b6-803f-eb82dedd5e2e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-n4pmk\" (UID: \"0df6d13d-89fc-45b6-803f-eb82dedd5e2e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.372520 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.381602 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.387506 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64cd7\" (UniqueName: \"kubernetes.io/projected/74ebbc5f-8432-43be-afdc-5aebcfd1dbf1-kube-api-access-64cd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-45lsh\" (UID: \"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.387856 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.388812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.389404 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.88938583 +0000 UTC m=+135.452169631 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.395381 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.402566 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.407218 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfkkw\" (UniqueName: \"kubernetes.io/projected/89ecf747-c0c0-4ce8-b2f0-37942e6cad18-kube-api-access-pfkkw\") pod \"kube-storage-version-migrator-operator-b67b599dd-tcp99\" (UID: \"89ecf747-c0c0-4ce8-b2f0-37942e6cad18\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.416877 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.446706 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.451626 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.454288 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggptj\" (UniqueName: \"kubernetes.io/projected/afe08e68-6cb9-4a66-926d-e1361a7249ec-kube-api-access-ggptj\") pod \"ingress-operator-5b745b69d9-blfxf\" (UID: \"afe08e68-6cb9-4a66-926d-e1361a7249ec\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.457699 4884 generic.go:334] "Generic (PLEG): container finished" podID="88c4733c-f3d7-4718-a865-bd4b9b510fbe" containerID="22e597a044c815d37fbfb2c987cb9a95d79edb11ca62739a7160c1f33f38dcb7" exitCode=0 Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.458345 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" event={"ID":"88c4733c-f3d7-4718-a865-bd4b9b510fbe","Type":"ContainerDied","Data":"22e597a044c815d37fbfb2c987cb9a95d79edb11ca62739a7160c1f33f38dcb7"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.458426 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" event={"ID":"88c4733c-f3d7-4718-a865-bd4b9b510fbe","Type":"ContainerStarted","Data":"52cba02de73fb40412fb4d76077d53d801dc4a5c042a5fd0a97acf9ad0e89339"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.464211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" event={"ID":"298783da-21dc-4586-bfd0-b157b74ce8f7","Type":"ContainerStarted","Data":"580755fd381fdd0ed8c938612d7b04f8e7cd3c5acafde66f97a295a00b65af03"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.487076 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwgdj\" (UniqueName: \"kubernetes.io/projected/fe6c657d-d2f1-4920-9cc5-2f982f3179cd-kube-api-access-fwgdj\") pod \"service-ca-operator-777779d784-qt7xx\" (UID: \"fe6c657d-d2f1-4920-9cc5-2f982f3179cd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.491265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.491481 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:35.991471165 +0000 UTC m=+135.554254956 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.501835 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bppks\" (UniqueName: \"kubernetes.io/projected/67917625-49ac-4f8f-b9d9-d07699acad03-kube-api-access-bppks\") pod \"machine-config-controller-84d6567774-vjntz\" (UID: \"67917625-49ac-4f8f-b9d9-d07699acad03\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.516256 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qkf2\" (UniqueName: \"kubernetes.io/projected/1b82c382-0692-4ad4-8764-46cf8d97681a-kube-api-access-7qkf2\") pod \"packageserver-d55dfcdfc-tt5ps\" (UID: \"1b82c382-0692-4ad4-8764-46cf8d97681a\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.533427 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4449t"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.540060 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" event={"ID":"b074b05e-c5dd-4818-8808-b6207aff3514","Type":"ContainerStarted","Data":"773d1620ac544b2ef19138b16f77630ec106f5cf1023b5e813cde87e2a7b9cdc"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.540132 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" event={"ID":"b074b05e-c5dd-4818-8808-b6207aff3514","Type":"ContainerStarted","Data":"f74083bfd8ac59dbfc08815159a70d1c5c16aee72f2007cec91a5ba6708f15ab"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.552762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6spl4\" (UniqueName: \"kubernetes.io/projected/c125ef33-e37f-4905-9c0b-16477e86e9d3-kube-api-access-6spl4\") pod \"dns-default-mllmk\" (UID: \"c125ef33-e37f-4905-9c0b-16477e86e9d3\") " pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.555711 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frs5n\" (UniqueName: \"kubernetes.io/projected/8f0f2356-d186-4942-8803-59247f45ced2-kube-api-access-frs5n\") pod \"ingress-canary-27rjh\" (UID: \"8f0f2356-d186-4942-8803-59247f45ced2\") " pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.559858 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb6x9\" (UniqueName: \"kubernetes.io/projected/b3b8d8d8-b236-4698-80ec-2ba508d0eef8-kube-api-access-pb6x9\") pod \"machine-config-server-wc8hv\" (UID: \"b3b8d8d8-b236-4698-80ec-2ba508d0eef8\") " pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.582368 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.598234 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.598736 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.601921 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.101901171 +0000 UTC m=+135.664684972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.611396 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" event={"ID":"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d","Type":"ContainerStarted","Data":"8d838a92e11e546fa08eeef61af21b92e8d8012fe6849e84594bf10814e7ad65"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.611461 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" event={"ID":"94b4a7af-0fb7-42d0-8ff5-80ef2a1ae02d","Type":"ContainerStarted","Data":"68c31823161184e4cc0a1b066ea6b86df2304d7f67e061a487f6eba4844ac71f"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.632718 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.642143 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-pnpw5"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.644414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-gxgjt"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.645820 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.650400 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" event={"ID":"7181d6c4-803a-4a92-869c-0f7a69724cb1","Type":"ContainerStarted","Data":"7123fa50644bd57227bdf791f624148df19c1347a403225942112280eda37e6a"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.698656 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" event={"ID":"837a51ee-4dbf-439d-9249-d49cd09a0585","Type":"ContainerDied","Data":"a8fd3c3e299cad6765868e0fc7efc4381a3ab3e35527fb83fc5b09a077cf57bc"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.698854 4884 generic.go:334] "Generic (PLEG): container finished" podID="837a51ee-4dbf-439d-9249-d49cd09a0585" containerID="a8fd3c3e299cad6765868e0fc7efc4381a3ab3e35527fb83fc5b09a077cf57bc" exitCode=0 Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.698973 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" event={"ID":"837a51ee-4dbf-439d-9249-d49cd09a0585","Type":"ContainerStarted","Data":"8e40c6b8107290518e3bdffd7e99e2321081e2b5c31a886ab529cd43fe88826d"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.699674 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.700702 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.200690349 +0000 UTC m=+135.763474150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.709867 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.710282 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" event={"ID":"9051d787-06db-42b2-846a-231f40dc737c","Type":"ContainerStarted","Data":"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.711201 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.712477 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rhppm" event={"ID":"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf","Type":"ContainerStarted","Data":"e09149f7edbf0d18769a17c684adf4b899aa24b4472691773645091b88a8f938"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.712513 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rhppm" event={"ID":"5b49f112-ca99-4bb7-9aaf-3aff77fbedaf","Type":"ContainerStarted","Data":"3535d391effb363c3d9e2ad2adfe8f2a459a60f67007afb7dd371306a3a1c001"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.713582 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" event={"ID":"c658ccdf-b838-446f-857b-3acd10099f88","Type":"ContainerStarted","Data":"2cbee876eb9529443fae0f48dc41348f4c55c9e070998f282b1eb3261186ddd8"} Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.714309 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2m5q7"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.716319 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.725537 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.725857 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.731815 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.736958 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.741653 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.756780 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-wc8hv" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.770862 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.782754 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-27rjh" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.802008 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.804020 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.304000596 +0000 UTC m=+135.866784397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.848845 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z9r54"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.904404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:35 crc kubenswrapper[4884]: E1128 15:21:35.904730 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.404715653 +0000 UTC m=+135.967499454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.924139 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd"] Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.931507 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:35 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:35 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:35 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.931555 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:35 crc kubenswrapper[4884]: I1128 15:21:35.942664 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-rhppm" podStartSLOduration=116.942639493 podStartE2EDuration="1m56.942639493s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:35.939622072 +0000 UTC m=+135.502405883" watchObservedRunningTime="2025-11-28 15:21:35.942639493 +0000 UTC m=+135.505423294" Nov 28 15:21:35 crc kubenswrapper[4884]: W1128 15:21:35.981380 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5e68c28_e4f3_4e6b_b80d_0dbd49afdcb5.slice/crio-1fbd60dd3c5437c64b2df0e6043c654dc05e3ee61e6c4133df424f86f4323913 WatchSource:0}: Error finding container 1fbd60dd3c5437c64b2df0e6043c654dc05e3ee61e6c4133df424f86f4323913: Status 404 returned error can't find the container with id 1fbd60dd3c5437c64b2df0e6043c654dc05e3ee61e6c4133df424f86f4323913 Nov 28 15:21:36 crc kubenswrapper[4884]: W1128 15:21:36.001295 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50b0307c_e145_43ae_b97a_207ff99980a5.slice/crio-9f6fb0155df5d61d7e403f0564d4b9bc73c32c725d7dbf8616e0a5f5f1b027dc WatchSource:0}: Error finding container 9f6fb0155df5d61d7e403f0564d4b9bc73c32c725d7dbf8616e0a5f5f1b027dc: Status 404 returned error can't find the container with id 9f6fb0155df5d61d7e403f0564d4b9bc73c32c725d7dbf8616e0a5f5f1b027dc Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.006241 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.006627 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.506612293 +0000 UTC m=+136.069396094 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.108068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.108791 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.60877646 +0000 UTC m=+136.171560271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.185378 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" podStartSLOduration=118.185359177 podStartE2EDuration="1m58.185359177s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:36.185279405 +0000 UTC m=+135.748063206" watchObservedRunningTime="2025-11-28 15:21:36.185359177 +0000 UTC m=+135.748142978" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.186565 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-c7d2l" podStartSLOduration=118.186559329 podStartE2EDuration="1m58.186559329s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:36.138966533 +0000 UTC m=+135.701750334" watchObservedRunningTime="2025-11-28 15:21:36.186559329 +0000 UTC m=+135.749343130" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.209870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.210330 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.71031186 +0000 UTC m=+136.273095661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.311964 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.312348 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.812332553 +0000 UTC m=+136.375116354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.412627 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.413529 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:36.913493043 +0000 UTC m=+136.476276854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.513781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.514179 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.01416509 +0000 UTC m=+136.576948901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.614715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.615452 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.115436793 +0000 UTC m=+136.678220594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.717063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.717572 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.217559478 +0000 UTC m=+136.780343279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.744068 4884 generic.go:334] "Generic (PLEG): container finished" podID="c658ccdf-b838-446f-857b-3acd10099f88" containerID="82ad69a1d31d7dea992426dffa14d6deacd0f70e531caeb2830cbb18718b5704" exitCode=0 Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.750775 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-2m5q7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.751021 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2m5q7" podUID="a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.769772 4884 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vs6bp container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.769853 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.820583 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.824031 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.324012919 +0000 UTC m=+136.886796720 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828456 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828488 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828498 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828509 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns"] Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828531 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" event={"ID":"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13","Type":"ContainerStarted","Data":"72db4f8f00e062ca7cae7c91ea523d3095433789d71aedcd0576d7368f414fc0"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" event={"ID":"fb4d8a1a-9de2-4a21-bcf3-ed3c51104d13","Type":"ContainerStarted","Data":"e954d4d753889eea33f394b6e0e79cfe41a0b2ae03211a0a2aa2779ca91bd39e"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828557 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" event={"ID":"c658ccdf-b838-446f-857b-3acd10099f88","Type":"ContainerDied","Data":"82ad69a1d31d7dea992426dffa14d6deacd0f70e531caeb2830cbb18718b5704"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2m5q7" event={"ID":"a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5","Type":"ContainerStarted","Data":"5c068628ee233ab9623f5aed4af967d83512aa551aa5d179f40194ebac720889"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828580 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2m5q7" event={"ID":"a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5","Type":"ContainerStarted","Data":"1fbd60dd3c5437c64b2df0e6043c654dc05e3ee61e6c4133df424f86f4323913"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828589 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" event={"ID":"50b0307c-e145-43ae-b97a-207ff99980a5","Type":"ContainerStarted","Data":"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" event={"ID":"50b0307c-e145-43ae-b97a-207ff99980a5","Type":"ContainerStarted","Data":"9f6fb0155df5d61d7e403f0564d4b9bc73c32c725d7dbf8616e0a5f5f1b027dc"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828608 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" event={"ID":"06ab8887-8ada-45d6-a104-ab5732219eeb","Type":"ContainerStarted","Data":"5807dbe1444e2be6de4f4512c5d2066f1010bd99b0dac3dd57266377a68b032d"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828617 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" event={"ID":"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b","Type":"ContainerStarted","Data":"b81c446c6d7fb1c5cdff42e2d47b878417e7633f774bbc6caf9b3eb47f591320"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828625 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" event={"ID":"999842f5-f4a9-4ccb-a9e5-a086dfaa5a7b","Type":"ContainerStarted","Data":"ab6d7d3c2fa8f4be0dd661a994145b3909b577b50a4be68d88db27193f37dc88"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" event={"ID":"7181d6c4-803a-4a92-869c-0f7a69724cb1","Type":"ContainerStarted","Data":"ca1cf4c227dbcd02c20ad38f2c8a66a89917f40ada70d16c385978a66a29ca68"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" event={"ID":"7181d6c4-803a-4a92-869c-0f7a69724cb1","Type":"ContainerStarted","Data":"e699f15748d7f0d2d7b078610d27674ab9e1d7127683e18e92a5129effa5468d"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828649 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" event={"ID":"88c4733c-f3d7-4718-a865-bd4b9b510fbe","Type":"ContainerStarted","Data":"3a7e40d148f1b271f5cd0a83e197980c80e3a1c3c488757391d7f1eccb69408e"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.828658 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z9r54" event={"ID":"58ae95c6-76f6-4d84-b306-7ab053006feb","Type":"ContainerStarted","Data":"db6a8d7791ca1acc68f5dc5c909230efd8bd3d76931bdb77e9dfcd6c16d14e62"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.829484 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqjn2" event={"ID":"fca97aac-84bd-4f0b-93b0-f7a3f641076b","Type":"ContainerStarted","Data":"6befeb37e618bb0773ce4b2a7a5e696613ef60023894c9341ddcbd2ec28bd02d"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.834424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wc8hv" event={"ID":"b3b8d8d8-b236-4698-80ec-2ba508d0eef8","Type":"ContainerStarted","Data":"f9880882ffe6527053a8cc6ccc834b90de443eee313978838824653057eb189b"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.840256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" event={"ID":"bfa540e4-eea8-4206-b872-5e42d80e017c","Type":"ContainerStarted","Data":"83b3d69f435aceb1a78c29e5ca951cc929b4bb8c10ef6675aa820ccd81afc6e7"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.840447 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" event={"ID":"bfa540e4-eea8-4206-b872-5e42d80e017c","Type":"ContainerStarted","Data":"ff74b7c8b958bafa023520630001eb735be0089e3c1b01a4c250bfc4ecfe5c1e"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.840979 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.842069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" event={"ID":"aa29b5a0-3a7e-41e2-92c2-544ddda5505d","Type":"ContainerStarted","Data":"485ee40d4387763540ee28add74484feb1314c772a4328d991e67300d22e0bd5"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.853441 4884 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4449t container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" start-of-body= Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.853497 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.862366 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" event={"ID":"837a51ee-4dbf-439d-9249-d49cd09a0585","Type":"ContainerStarted","Data":"d0f7787c0adada56e99712dcafa03ac9b4c7d3771ff7f9f1bdb859961c84225f"} Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.923928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:36 crc kubenswrapper[4884]: E1128 15:21:36.924854 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.42482966 +0000 UTC m=+136.987613571 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:36 crc kubenswrapper[4884]: I1128 15:21:36.994656 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-6tbrh" podStartSLOduration=117.994637006 podStartE2EDuration="1m57.994637006s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:36.993150417 +0000 UTC m=+136.555934238" watchObservedRunningTime="2025-11-28 15:21:36.994637006 +0000 UTC m=+136.557420807" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.024841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.025264 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.525248901 +0000 UTC m=+137.088032702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.028081 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7c6tz" podStartSLOduration=119.028069065 podStartE2EDuration="1m59.028069065s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.02635476 +0000 UTC m=+136.589138571" watchObservedRunningTime="2025-11-28 15:21:37.028069065 +0000 UTC m=+136.590852866" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.052409 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:37 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:37 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:37 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.052466 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.130265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.130777 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.630760556 +0000 UTC m=+137.193544357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.233007 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.233429 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.733414035 +0000 UTC m=+137.296197836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.236684 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-2m5q7" podStartSLOduration=119.236668092 podStartE2EDuration="1m59.236668092s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.235929872 +0000 UTC m=+136.798713673" watchObservedRunningTime="2025-11-28 15:21:37.236668092 +0000 UTC m=+136.799451893" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.273260 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" podStartSLOduration=119.273236085 podStartE2EDuration="1m59.273236085s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.268380735 +0000 UTC m=+136.831164536" watchObservedRunningTime="2025-11-28 15:21:37.273236085 +0000 UTC m=+136.836019886" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.334277 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.334612 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.834600077 +0000 UTC m=+137.397383878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.394300 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" podStartSLOduration=119.394279543 podStartE2EDuration="1m59.394279543s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.354017183 +0000 UTC m=+136.916800984" watchObservedRunningTime="2025-11-28 15:21:37.394279543 +0000 UTC m=+136.957063354" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.394788 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-qxzxh" podStartSLOduration=119.394783207 podStartE2EDuration="1m59.394783207s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.393989096 +0000 UTC m=+136.956772897" watchObservedRunningTime="2025-11-28 15:21:37.394783207 +0000 UTC m=+136.957567028" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.425685 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" podStartSLOduration=118.425668068 podStartE2EDuration="1m58.425668068s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.420672815 +0000 UTC m=+136.983456606" watchObservedRunningTime="2025-11-28 15:21:37.425668068 +0000 UTC m=+136.988451869" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.437643 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.438244 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.938220312 +0000 UTC m=+137.501004113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.438356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.438766 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:37.938749686 +0000 UTC m=+137.501533487 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.454134 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.508106 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-gxgjt" podStartSLOduration=118.5080701 podStartE2EDuration="1m58.5080701s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.465285591 +0000 UTC m=+137.028069392" watchObservedRunningTime="2025-11-28 15:21:37.5080701 +0000 UTC m=+137.070853901" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.508219 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-pnpw5" podStartSLOduration=119.508214993 podStartE2EDuration="1m59.508214993s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:37.507629527 +0000 UTC m=+137.070413328" watchObservedRunningTime="2025-11-28 15:21:37.508214993 +0000 UTC m=+137.070998794" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.539731 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.540275 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.040260256 +0000 UTC m=+137.603044057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.548764 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.570189 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-vcmc8"] Nov 28 15:21:37 crc kubenswrapper[4884]: W1128 15:21:37.582857 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b786443_445f_4761_a641_5015eb9f89e2.slice/crio-5a3755747a449f7e62cb76c6bf8baf85b27b646ac63addf3d613f22beeffa4f6 WatchSource:0}: Error finding container 5a3755747a449f7e62cb76c6bf8baf85b27b646ac63addf3d613f22beeffa4f6: Status 404 returned error can't find the container with id 5a3755747a449f7e62cb76c6bf8baf85b27b646ac63addf3d613f22beeffa4f6 Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.613826 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.619518 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.642218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.642564 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.142553566 +0000 UTC m=+137.705337357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.739718 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:37 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:37 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:37 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.739975 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.743878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.744208 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.244192238 +0000 UTC m=+137.806976039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.846691 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.846983 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.346972111 +0000 UTC m=+137.909755912 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.894242 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pfrvj"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.895884 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.936232 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.944117 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.944947 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99"] Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.949779 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:37 crc kubenswrapper[4884]: E1128 15:21:37.951038 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.451021708 +0000 UTC m=+138.013805499 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:37 crc kubenswrapper[4884]: I1128 15:21:37.982928 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" event={"ID":"27fe3a18-ed42-4d55-b718-528dab100bac","Type":"ContainerStarted","Data":"b94857ca9c149314ebc47b33818319be58e74e6394a83ee0c2d1981f021db1c2"} Nov 28 15:21:37 crc kubenswrapper[4884]: W1128 15:21:37.987116 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f95e883_844c_43e5_b2a4_1828e38634c3.slice/crio-91a5efb88f5376a13da800eaaca5c9650d88ac82e7ae827a9f56191422fd4e61 WatchSource:0}: Error finding container 91a5efb88f5376a13da800eaaca5c9650d88ac82e7ae827a9f56191422fd4e61: Status 404 returned error can't find the container with id 91a5efb88f5376a13da800eaaca5c9650d88ac82e7ae827a9f56191422fd4e61 Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.014652 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.014898 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqjn2" event={"ID":"fca97aac-84bd-4f0b-93b0-f7a3f641076b","Type":"ContainerStarted","Data":"73da5938365cfacc272b1076e251360798b528f2d9347c60d331e90c980511c5"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.025818 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-whh5g"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.032023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" event={"ID":"3b786443-445f-4761-a641-5015eb9f89e2","Type":"ContainerStarted","Data":"5a3755747a449f7e62cb76c6bf8baf85b27b646ac63addf3d613f22beeffa4f6"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.033023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" event={"ID":"fdc11347-1937-480e-9498-a57c90434b82","Type":"ContainerStarted","Data":"7be43fd7ff6c5c11b6b141359533de140cbaa8d62f732af9ab3343cc23806839"} Nov 28 15:21:38 crc kubenswrapper[4884]: W1128 15:21:38.034202 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b82c382_0692_4ad4_8764_46cf8d97681a.slice/crio-bc42827c37b3509d53455d4a2327657a302b6f118b6d342f0d7d73e44671d105 WatchSource:0}: Error finding container bc42827c37b3509d53455d4a2327657a302b6f118b6d342f0d7d73e44671d105: Status 404 returned error can't find the container with id bc42827c37b3509d53455d4a2327657a302b6f118b6d342f0d7d73e44671d105 Nov 28 15:21:38 crc kubenswrapper[4884]: W1128 15:21:38.035310 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89ecf747_c0c0_4ce8_b2f0_37942e6cad18.slice/crio-34df1d570f85288c422886e448624f60de4b969bd2f32a1c3460bf0d004c0b26 WatchSource:0}: Error finding container 34df1d570f85288c422886e448624f60de4b969bd2f32a1c3460bf0d004c0b26: Status 404 returned error can't find the container with id 34df1d570f85288c422886e448624f60de4b969bd2f32a1c3460bf0d004c0b26 Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.035551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" event={"ID":"aa29b5a0-3a7e-41e2-92c2-544ddda5505d","Type":"ContainerStarted","Data":"a7b3b00af28b2cfaa86207b1370be83bc438e8e40179a5b87d5af9a60c942d66"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.035576 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" event={"ID":"aa29b5a0-3a7e-41e2-92c2-544ddda5505d","Type":"ContainerStarted","Data":"84cfcfec1836698034ed17eb78bb79a6617398a7640b4b2e90b01830b472513c"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.042292 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerStarted","Data":"514e6e98b772c23c77c07f2f359d6edc5c466870647d09c9e9edf5f48fa565c1"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.042327 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerStarted","Data":"4c1b6f547bf2ed7fb9555852b0f54c1be604e9f7fa418ba543c12ed3daabaa7e"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.043345 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.046200 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-mllmk"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.046242 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" event={"ID":"669c7bb0-9847-45c5-a864-e1e30beb5c7d","Type":"ContainerStarted","Data":"0e7b7298939e89fc7fe57e8f8272e42e5caeacc9b5a3d7bf3c92b784df06949d"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.050156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z9r54" event={"ID":"58ae95c6-76f6-4d84-b306-7ab053006feb","Type":"ContainerStarted","Data":"0174f936c034a7850096d3c11e933a198483dc1c1b7f8650ecd588fde1d34d42"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.050312 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.052430 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.053135 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz"] Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.053847 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.553835401 +0000 UTC m=+138.116619202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.057595 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9frzk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.057638 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.058268 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" event={"ID":"c658ccdf-b838-446f-857b-3acd10099f88","Type":"ContainerStarted","Data":"f5bf3eb6dbe75a6cd0eb35fa24ca0f3901525b536b839bb47f7d190fa27b6c66"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.065396 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-tqjn2" podStartSLOduration=120.065377339 podStartE2EDuration="2m0.065377339s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.060968911 +0000 UTC m=+137.623752712" watchObservedRunningTime="2025-11-28 15:21:38.065377339 +0000 UTC m=+137.628161140" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.068248 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" event={"ID":"06ab8887-8ada-45d6-a104-ab5732219eeb","Type":"ContainerStarted","Data":"8a44b566d56021f42d03ae81d049e0680a32a15e8ccfde9ac9672fd5486c3b4a"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.069783 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" event={"ID":"0e48ffa6-db35-47f0-bc85-70128ced240c","Type":"ContainerStarted","Data":"97bbad4a79560b4ceb5482d04bebbf9d0b74d408d3dc0a46619853135ee95141"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.069827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" event={"ID":"0e48ffa6-db35-47f0-bc85-70128ced240c","Type":"ContainerStarted","Data":"db2ff7b35f4ce80a9f3d41559332303e6bd73cc9b391a93c586f0a02fbf8f70a"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.069839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" event={"ID":"0e48ffa6-db35-47f0-bc85-70128ced240c","Type":"ContainerStarted","Data":"7fe10c2c389fd4503f42f90737630d14642511ad37989ae02b4906541fa802e2"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.071232 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-wc8hv" event={"ID":"b3b8d8d8-b236-4698-80ec-2ba508d0eef8","Type":"ContainerStarted","Data":"933a97950184b30377486d2b7c389ca626b043ba566775728dd923c9c0301da0"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.074461 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" event={"ID":"837a51ee-4dbf-439d-9249-d49cd09a0585","Type":"ContainerStarted","Data":"6a01be3661bce7119234faa12b3b9cb27aae3c1499f5ff3d2819182009be81e0"} Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.074986 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-2m5q7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.075046 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2m5q7" podUID="a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.081849 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.101895 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pztqc"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.105219 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.108858 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.108923 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.109850 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-262rm" podStartSLOduration=119.109839751 podStartE2EDuration="1m59.109839751s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.09853778 +0000 UTC m=+137.661321591" watchObservedRunningTime="2025-11-28 15:21:38.109839751 +0000 UTC m=+137.672623552" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.119500 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" podStartSLOduration=119.119483137 podStartE2EDuration="1m59.119483137s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.117184727 +0000 UTC m=+137.679968528" watchObservedRunningTime="2025-11-28 15:21:38.119483137 +0000 UTC m=+137.682266938" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.127169 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.135530 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.135753 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" podStartSLOduration=119.13573797 podStartE2EDuration="1m59.13573797s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.131938488 +0000 UTC m=+137.694722289" watchObservedRunningTime="2025-11-28 15:21:38.13573797 +0000 UTC m=+137.698521771" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.143053 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-27rjh"] Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.145423 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-z9r54" podStartSLOduration=120.145407617 podStartE2EDuration="2m0.145407617s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.144509993 +0000 UTC m=+137.707293794" watchObservedRunningTime="2025-11-28 15:21:38.145407617 +0000 UTC m=+137.708191418" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.153796 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.155545 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.655531436 +0000 UTC m=+138.218315237 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.163496 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-wc8hv" podStartSLOduration=6.163482318 podStartE2EDuration="6.163482318s" podCreationTimestamp="2025-11-28 15:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.163157248 +0000 UTC m=+137.725941049" watchObservedRunningTime="2025-11-28 15:21:38.163482318 +0000 UTC m=+137.726266119" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.207979 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-h65ns" podStartSLOduration=119.20796487 podStartE2EDuration="1m59.20796487s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.205844064 +0000 UTC m=+137.768627865" watchObservedRunningTime="2025-11-28 15:21:38.20796487 +0000 UTC m=+137.770748671" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.228854 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" podStartSLOduration=120.228838895 podStartE2EDuration="2m0.228838895s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.227282074 +0000 UTC m=+137.790065875" watchObservedRunningTime="2025-11-28 15:21:38.228838895 +0000 UTC m=+137.791622696" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.264905 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.265283 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.765266954 +0000 UTC m=+138.328050755 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.366339 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.366584 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.866554768 +0000 UTC m=+138.429338579 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.367510 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.367818 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.867811541 +0000 UTC m=+138.430595342 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.468723 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.468933 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.968904209 +0000 UTC m=+138.531688010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.469169 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.469536 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:38.969526075 +0000 UTC m=+138.532309926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.509296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.509352 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.572004 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.572138 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.072113654 +0000 UTC m=+138.634897455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.572296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.572777 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.072760181 +0000 UTC m=+138.635544002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.673112 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.673306 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.173281184 +0000 UTC m=+138.736064985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.673438 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.673719 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.173708575 +0000 UTC m=+138.736492376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.739132 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:38 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:38 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:38 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.739559 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.774340 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.774620 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.274606048 +0000 UTC m=+138.837389849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.877227 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.878294 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.378278815 +0000 UTC m=+138.941062616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.942459 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.970403 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7bnd" podStartSLOduration=120.970388644 podStartE2EDuration="2m0.970388644s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:38.243638629 +0000 UTC m=+137.806422430" watchObservedRunningTime="2025-11-28 15:21:38.970388644 +0000 UTC m=+138.533172445" Nov 28 15:21:38 crc kubenswrapper[4884]: I1128 15:21:38.982423 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:38 crc kubenswrapper[4884]: E1128 15:21:38.982707 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.482692072 +0000 UTC m=+139.045475873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.050474 4884 patch_prober.go:28] interesting pod/console-operator-58897d9998-z9r54 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.050540 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-z9r54" podUID="58ae95c6-76f6-4d84-b306-7ab053006feb" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.093267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.093619 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.593607791 +0000 UTC m=+139.156391592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.176659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" event={"ID":"afe08e68-6cb9-4a66-926d-e1361a7249ec","Type":"ContainerStarted","Data":"598e4814cc619ecd8e4810a92b142abab0b44466a1c8ddb4302861105680222d"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.180504 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" event={"ID":"5f95e883-844c-43e5-b2a4-1828e38634c3","Type":"ContainerStarted","Data":"91a5efb88f5376a13da800eaaca5c9650d88ac82e7ae827a9f56191422fd4e61"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.194840 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.195275 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.695256964 +0000 UTC m=+139.258040775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.211407 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" event={"ID":"3c26ad40-bf40-4023-92f2-e8753c7659e5","Type":"ContainerStarted","Data":"f952ffee6168fc8e0a49afe77edcdf83996341604d8f1b499fe696c9d2bb7683"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.218002 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" event={"ID":"3b786443-445f-4761-a641-5015eb9f89e2","Type":"ContainerStarted","Data":"e21480a63667a3d9f60b17d1dfe66894f9256d3c581b03e2b0ea9e270aa162c1"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.220144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" event={"ID":"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e","Type":"ContainerStarted","Data":"52171911e85886f76948095011a85d74bc50f682602758a93192bcfb258ee581"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.223310 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" event={"ID":"bd32abac-bba6-4b9e-bf45-60afad5b0e9e","Type":"ContainerStarted","Data":"f72004a53add76ffa5810be395204e20e68bae9c5e2404b8fcb11027661db2b2"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.231163 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" event={"ID":"89ecf747-c0c0-4ce8-b2f0-37942e6cad18","Type":"ContainerStarted","Data":"34df1d570f85288c422886e448624f60de4b969bd2f32a1c3460bf0d004c0b26"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.240191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" event={"ID":"3866e53c-b4ea-4a15-92f2-27f049e0304e","Type":"ContainerStarted","Data":"1e185a447c0ae928b56094dd9aa06b8f68d402513b7c42b3789b702ae3dcbe87"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.240234 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" event={"ID":"3866e53c-b4ea-4a15-92f2-27f049e0304e","Type":"ContainerStarted","Data":"aa71c8acda7f30237e69f84b30a69736c6379827fddeaa38abccc4abaab6faf4"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.242182 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.248170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" event={"ID":"0df6d13d-89fc-45b6-803f-eb82dedd5e2e","Type":"ContainerStarted","Data":"a5b8faeb94a9e544787a47f8f45df776d040a52d7d78ad27982783be03ab985b"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.261221 4884 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-74vl8 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.261271 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" podUID="3866e53c-b4ea-4a15-92f2-27f049e0304e" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.266279 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" podStartSLOduration=120.266266762 podStartE2EDuration="2m0.266266762s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.265398179 +0000 UTC m=+138.828181980" watchObservedRunningTime="2025-11-28 15:21:39.266266762 +0000 UTC m=+138.829050563" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.268417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" event={"ID":"d5f065cb-816a-4305-8c57-171ddb1ffad6","Type":"ContainerStarted","Data":"eecb92feed955c9fa8f77c3d0e5f393ef64949234e8a410a9570c9d4e8023995"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.290417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" event={"ID":"669c7bb0-9847-45c5-a864-e1e30beb5c7d","Type":"ContainerStarted","Data":"19715574ec90af1f1223f6a5de6eb642049b8e25d86042dc701201921f4b8d14"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.295987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.296261 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.796250519 +0000 UTC m=+139.359034310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.308712 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" event={"ID":"fe6c657d-d2f1-4920-9cc5-2f982f3179cd","Type":"ContainerStarted","Data":"e94a537c2c8b5d379d6df409c0d4579595d0d535f3e14e6632f2559c5259a5e6"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.343417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mllmk" event={"ID":"c125ef33-e37f-4905-9c0b-16477e86e9d3","Type":"ContainerStarted","Data":"f625efa214d81835b3caf208a80d1985b4c7e3d92a607dd2457441ffb2ee1df4"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.347881 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" event={"ID":"d8883bc8-819f-4441-b7ba-df49cdd9fdc8","Type":"ContainerStarted","Data":"f268a3c63c9637bf0bd82a5ff4c271c04981d56929fc321a58597923228d37b7"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.362937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" event={"ID":"fdc11347-1937-480e-9498-a57c90434b82","Type":"ContainerStarted","Data":"93ef1fe70198e56b6cb81070161f0fbcecf40ac29e70b26c7e681cdff8a792bc"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.402459 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.402999 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.902979477 +0000 UTC m=+139.465763278 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.403475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.403821 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:39.90381014 +0000 UTC m=+139.466594001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.464533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-27rjh" event={"ID":"8f0f2356-d186-4942-8803-59247f45ced2","Type":"ContainerStarted","Data":"e0de3f84d000e89ad25c8d32d67b9deef6d46cda335e98c81aeda00e94527829"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.502190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" event={"ID":"1b82c382-0692-4ad4-8764-46cf8d97681a","Type":"ContainerStarted","Data":"bc42827c37b3509d53455d4a2327657a302b6f118b6d342f0d7d73e44671d105"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.503134 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.505844 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-7hcwg" podStartSLOduration=121.505827772 podStartE2EDuration="2m1.505827772s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.397691956 +0000 UTC m=+138.960475777" watchObservedRunningTime="2025-11-28 15:21:39.505827772 +0000 UTC m=+139.068611573" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.509609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.510969 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.010918067 +0000 UTC m=+139.573701878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.528620 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" event={"ID":"27fe3a18-ed42-4d55-b718-528dab100bac","Type":"ContainerStarted","Data":"21d12087151ff2305bae18d9c78713758f7a352c586ed8ea55468a6c34dd8bda"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.543504 4884 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-tt5ps container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.543646 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" podUID="1b82c382-0692-4ad4-8764-46cf8d97681a" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.548211 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-27rjh" podStartSLOduration=7.548192789 podStartE2EDuration="7.548192789s" podCreationTimestamp="2025-11-28 15:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.50837234 +0000 UTC m=+139.071156141" watchObservedRunningTime="2025-11-28 15:21:39.548192789 +0000 UTC m=+139.110976590" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.549291 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" podStartSLOduration=120.549285717 podStartE2EDuration="2m0.549285717s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.542512548 +0000 UTC m=+139.105296369" watchObservedRunningTime="2025-11-28 15:21:39.549285717 +0000 UTC m=+139.112069518" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.569082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" event={"ID":"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1","Type":"ContainerStarted","Data":"1b25ff46a8d83f836dcb780e707f235686a50412316bd2aa5be7bbe9e8f0561a"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.571256 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-qt8mw" podStartSLOduration=120.571240541 podStartE2EDuration="2m0.571240541s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.569496515 +0000 UTC m=+139.132280316" watchObservedRunningTime="2025-11-28 15:21:39.571240541 +0000 UTC m=+139.134024342" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.592335 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" event={"ID":"67917625-49ac-4f8f-b9d9-d07699acad03","Type":"ContainerStarted","Data":"b3c82a642c920745b51828af3c0e33a19c8c3d1e9f0f9396f8c53f5d16e6b766"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.592387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" event={"ID":"67917625-49ac-4f8f-b9d9-d07699acad03","Type":"ContainerStarted","Data":"0a19a3333ee642d27428949aec0c6b81473251ab33b5332a29eee2cff834547a"} Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.595240 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-9frzk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" start-of-body= Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.595300 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.27:8080/healthz\": dial tcp 10.217.0.27:8080: connect: connection refused" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.595327 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.595364 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.612454 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" podStartSLOduration=120.612431177 podStartE2EDuration="2m0.612431177s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:39.611191294 +0000 UTC m=+139.173975095" watchObservedRunningTime="2025-11-28 15:21:39.612431177 +0000 UTC m=+139.175214978" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.629304 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.629687 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.129672605 +0000 UTC m=+139.692456406 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.637330 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.641895 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-z9r54" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.729756 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.730804 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.230772024 +0000 UTC m=+139.793555845 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.743236 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:39 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.743304 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.802008 4884 patch_prober.go:28] interesting pod/apiserver-76f77b778f-66tg2 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]log ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]etcd ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/max-in-flight-filter ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 28 15:21:39 crc kubenswrapper[4884]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [-]poststarthook/project.openshift.io-projectcache failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 28 15:21:39 crc kubenswrapper[4884]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 28 15:21:39 crc kubenswrapper[4884]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 15:21:39 crc kubenswrapper[4884]: livez check failed Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.802054 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" podUID="837a51ee-4dbf-439d-9249-d49cd09a0585" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.834321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.834627 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.334611565 +0000 UTC m=+139.897395376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:39 crc kubenswrapper[4884]: I1128 15:21:39.937735 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:39 crc kubenswrapper[4884]: E1128 15:21:39.938461 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.438442926 +0000 UTC m=+140.001226737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.040436 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.040830 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.540814559 +0000 UTC m=+140.103598360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.142229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.142381 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.642361088 +0000 UTC m=+140.205144899 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.142870 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.143214 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.643202841 +0000 UTC m=+140.205986642 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.244395 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.244593 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.744564976 +0000 UTC m=+140.307348787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.244668 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.245021 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.745011408 +0000 UTC m=+140.307795219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.345928 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.346059 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.846042085 +0000 UTC m=+140.408825886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.346140 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.346412 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.846404694 +0000 UTC m=+140.409188495 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.447750 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.447897 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.947875482 +0000 UTC m=+140.510659283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.448257 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.448572 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:40.948562481 +0000 UTC m=+140.511346292 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.549005 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.549379 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.049357611 +0000 UTC m=+140.612141412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.555190 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.625607 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" event={"ID":"89ecf747-c0c0-4ce8-b2f0-37942e6cad18","Type":"ContainerStarted","Data":"5aaf88f0345a93da9bcfdf968318433c98cb39062c469f02f8d45c2f9a47cc9d"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.635560 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" event={"ID":"fe6c657d-d2f1-4920-9cc5-2f982f3179cd","Type":"ContainerStarted","Data":"a926ce9fb1591d993609a0388fb21bab2126835c9b2d949b6a496d70fcfcc696"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.640183 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mllmk" event={"ID":"c125ef33-e37f-4905-9c0b-16477e86e9d3","Type":"ContainerStarted","Data":"a545c719d43a6323c6068a0a391663a0b10949a21e7eced11ea3dd2e451fe42e"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.640214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mllmk" event={"ID":"c125ef33-e37f-4905-9c0b-16477e86e9d3","Type":"ContainerStarted","Data":"eaeb3fd67fb49446e1327ec48c9ebd4f49269e3338ef2d24f49ef0d6d19fdaf3"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.640690 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.649971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.651368 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.151355984 +0000 UTC m=+140.714139785 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.653641 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-tcp99" podStartSLOduration=121.653631454 podStartE2EDuration="2m1.653631454s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.648535719 +0000 UTC m=+140.211319510" watchObservedRunningTime="2025-11-28 15:21:40.653631454 +0000 UTC m=+140.216415255" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.657148 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" event={"ID":"3c26ad40-bf40-4023-92f2-e8753c7659e5","Type":"ContainerStarted","Data":"bcac0ffbe5f5b7eb033b77a908cc43afec02b54ca21f66c97571f4f137ea5870"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.657185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" event={"ID":"3c26ad40-bf40-4023-92f2-e8753c7659e5","Type":"ContainerStarted","Data":"a17ead967cc1b497cc8130dc43455751d11c723fdcf0e18651ae91f552f99c28"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.660324 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-45lsh" event={"ID":"74ebbc5f-8432-43be-afdc-5aebcfd1dbf1","Type":"ContainerStarted","Data":"d6ab9cdfd66f4af99ee233b1c1485fe619706a43f059313a3ab8fb7c5cb6c9c9"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.668343 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-qt7xx" podStartSLOduration=121.668331155 podStartE2EDuration="2m1.668331155s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.667240496 +0000 UTC m=+140.230024307" watchObservedRunningTime="2025-11-28 15:21:40.668331155 +0000 UTC m=+140.231114956" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.681868 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" event={"ID":"afe08e68-6cb9-4a66-926d-e1361a7249ec","Type":"ContainerStarted","Data":"4bc003021fed30f54ad89c8c403e97634a6108dc1650b5c482c50d03049409f2"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.681902 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" event={"ID":"afe08e68-6cb9-4a66-926d-e1361a7249ec","Type":"ContainerStarted","Data":"af914ad3e55c92b52a7fd57135976562caf2e5cfc505fe2dd39b3d2467bc196d"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.705835 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-mllmk" podStartSLOduration=8.705817491 podStartE2EDuration="8.705817491s" podCreationTimestamp="2025-11-28 15:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.689800796 +0000 UTC m=+140.252584617" watchObservedRunningTime="2025-11-28 15:21:40.705817491 +0000 UTC m=+140.268601292" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.708002 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-blfxf" podStartSLOduration=121.707994709 podStartE2EDuration="2m1.707994709s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.705504283 +0000 UTC m=+140.268288084" watchObservedRunningTime="2025-11-28 15:21:40.707994709 +0000 UTC m=+140.270778510" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.717799 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.717848 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" event={"ID":"5f95e883-844c-43e5-b2a4-1828e38634c3","Type":"ContainerStarted","Data":"acb6aa6f54636e73325adf92f879b4b3eb6a5a02523271fbd143babddc74557e"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.720125 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-whh5g" podStartSLOduration=121.720115322 podStartE2EDuration="2m1.720115322s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.719750922 +0000 UTC m=+140.282534733" watchObservedRunningTime="2025-11-28 15:21:40.720115322 +0000 UTC m=+140.282899123" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.727296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.731435 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" event={"ID":"0df6d13d-89fc-45b6-803f-eb82dedd5e2e","Type":"ContainerStarted","Data":"f21a86e3fce1fd746bc5a5f2631dabba890f32bccaaf7cb95e5af505d2ab5688"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.741282 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:40 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:40 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:40 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.741327 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.747038 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" event={"ID":"3b786443-445f-4761-a641-5015eb9f89e2","Type":"ContainerStarted","Data":"8999bbd606828cd115ef2bed32a8b688d46a7cf5c23e30b478a751ae119769fa"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.754895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.755593 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.255570384 +0000 UTC m=+140.818354185 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.757029 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wjmmn" podStartSLOduration=121.757015213 podStartE2EDuration="2m1.757015213s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.754558267 +0000 UTC m=+140.317342068" watchObservedRunningTime="2025-11-28 15:21:40.757015213 +0000 UTC m=+140.319799014" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.763793 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" event={"ID":"bd32abac-bba6-4b9e-bf45-60afad5b0e9e","Type":"ContainerStarted","Data":"22a5a3e84ca91cb76bdf3e2031cab324b8aa7d47c5e5bd47d1f01ec1ac2aa42c"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.771485 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" event={"ID":"d5f065cb-816a-4305-8c57-171ddb1ffad6","Type":"ContainerStarted","Data":"de6396328a3e35b01704295f01ba750208b2a1b118874b9e2a9dbdd6c79389a1"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.817836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" event={"ID":"1b82c382-0692-4ad4-8764-46cf8d97681a","Type":"ContainerStarted","Data":"7c3fa50746f44623751b632d58fbce9b0f2d6639d2303c20622bdeeaf54c04ce"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.820777 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" event={"ID":"d8883bc8-819f-4441-b7ba-df49cdd9fdc8","Type":"ContainerStarted","Data":"d5ff5a5f175d2d3a56a38c631736992090e2a6fbe61a7692faf286c110ff002d"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.836548 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-vcmc8" podStartSLOduration=121.836528007 podStartE2EDuration="2m1.836528007s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.80657699 +0000 UTC m=+140.369360801" watchObservedRunningTime="2025-11-28 15:21:40.836528007 +0000 UTC m=+140.399311808" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.867341 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.868024 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" event={"ID":"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e","Type":"ContainerStarted","Data":"ba8a06deeae6a79c892bac353acf188fb14fc722929a6ed0c27a4d70237de40e"} Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.868727 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.368716433 +0000 UTC m=+140.931500234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.870225 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tt5ps" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.870893 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-n4pmk" podStartSLOduration=121.87086873 podStartE2EDuration="2m1.87086873s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.863363661 +0000 UTC m=+140.426147462" watchObservedRunningTime="2025-11-28 15:21:40.87086873 +0000 UTC m=+140.433652531" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.889256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" event={"ID":"669c7bb0-9847-45c5-a864-e1e30beb5c7d","Type":"ContainerStarted","Data":"99978bf0a5dc0d9e83060443dd1bc3b9fc7125f3d1e30a230bb194a9e086cafd"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.889850 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.904140 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" event={"ID":"67917625-49ac-4f8f-b9d9-d07699acad03","Type":"ContainerStarted","Data":"85890e630b99831989ab0c0c78fab215aba2ae1552b6691a3b226b224d0bc28c"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.922110 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-27rjh" event={"ID":"8f0f2356-d186-4942-8803-59247f45ced2","Type":"ContainerStarted","Data":"a9dfe6b6075184cac89ea8f3f81c2d16db398739eea4df8acd4d92fb678432de"} Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.940363 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-74vl8" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.946270 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-vrs25" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.953016 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-ds8wc" podStartSLOduration=121.952998034 podStartE2EDuration="2m1.952998034s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:40.903914649 +0000 UTC m=+140.466698450" watchObservedRunningTime="2025-11-28 15:21:40.952998034 +0000 UTC m=+140.515781835" Nov 28 15:21:40 crc kubenswrapper[4884]: I1128 15:21:40.969560 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:40 crc kubenswrapper[4884]: E1128 15:21:40.970410 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.470390416 +0000 UTC m=+141.033174217 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.011961 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-pztqc" podStartSLOduration=122.011947582 podStartE2EDuration="2m2.011947582s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:41.01074102 +0000 UTC m=+140.573524821" watchObservedRunningTime="2025-11-28 15:21:41.011947582 +0000 UTC m=+140.574731393" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.038029 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" podStartSLOduration=123.038007455 podStartE2EDuration="2m3.038007455s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:41.034324247 +0000 UTC m=+140.597108048" watchObservedRunningTime="2025-11-28 15:21:41.038007455 +0000 UTC m=+140.600791256" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.074263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.075105 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.57507489 +0000 UTC m=+141.137858801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.149954 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vjntz" podStartSLOduration=122.149934981 podStartE2EDuration="2m2.149934981s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:41.117867659 +0000 UTC m=+140.680651460" watchObservedRunningTime="2025-11-28 15:21:41.149934981 +0000 UTC m=+140.712718782" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.175911 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.176391 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.676376665 +0000 UTC m=+141.239160466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.263611 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" podStartSLOduration=122.263590213 podStartE2EDuration="2m2.263590213s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:41.230949576 +0000 UTC m=+140.793733377" watchObservedRunningTime="2025-11-28 15:21:41.263590213 +0000 UTC m=+140.826374014" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.278714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.279133 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.779117376 +0000 UTC m=+141.341901177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.354885 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.355781 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: W1128 15:21:41.359831 4884 reflector.go:561] object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g": failed to list *v1.Secret: secrets "certified-operators-dockercfg-4rs5g" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.359879 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-4rs5g\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"certified-operators-dockercfg-4rs5g\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.373414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.379678 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.379857 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.879836175 +0000 UTC m=+141.442619976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.379936 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.380291 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.880279786 +0000 UTC m=+141.443063587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.480700 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.480841 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.98082418 +0000 UTC m=+141.543607981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.480918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.480938 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.480990 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ttmk\" (UniqueName: \"kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.481039 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.485371 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:41.98535903 +0000 UTC m=+141.548142831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.520409 4884 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.547985 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.549073 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.552399 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.588677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.588986 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.589016 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.589111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ttmk\" (UniqueName: \"kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.590177 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:42.090157897 +0000 UTC m=+141.652941698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.599326 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.603602 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.644563 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ttmk\" (UniqueName: \"kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk\") pod \"certified-operators-8wcfx\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.649026 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.691250 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.691298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k98vg\" (UniqueName: \"kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.691346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.691436 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.691756 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:42.191744209 +0000 UTC m=+141.754528010 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.744566 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.745782 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.746318 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:41 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:41 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:41 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.746364 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.760335 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.794536 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.794667 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 15:21:42.294650165 +0000 UTC m=+141.857433966 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.794739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.794807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.794855 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k98vg\" (UniqueName: \"kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.794898 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.795133 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.795210 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: E1128 15:21:41.795220 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 15:21:42.29520931 +0000 UTC m=+141.857993121 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-ls7t4" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.832149 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k98vg\" (UniqueName: \"kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg\") pod \"community-operators-b6kf2\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.866764 4884 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T15:21:41.520438933Z","Handler":null,"Name":""} Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.868972 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.869664 4884 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.869698 4884 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.895761 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.896521 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.896594 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsdkb\" (UniqueName: \"kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.896668 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.923644 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.956165 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.957109 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.957453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" event={"ID":"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e","Type":"ContainerStarted","Data":"b4193047625e90e29d361d3aad7bbdd1fd166ee33b1bab7df8eaea33819a5ffc"} Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.957476 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" event={"ID":"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e","Type":"ContainerStarted","Data":"95607e462e4c1eb461d01f105a161f440a9641905feb9b8755ca846f468dbc80"} Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.957487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" event={"ID":"c583b5ab-7db2-4bf8-a644-2abe08ab7e4e","Type":"ContainerStarted","Data":"51be654c72c4d39a5de04f25098bb5cf4e7304fc7841781024742f60576207fa"} Nov 28 15:21:41 crc kubenswrapper[4884]: I1128 15:21:41.992746 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.003670 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.003761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.003824 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.003849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsdkb\" (UniqueName: \"kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.004945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.005168 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.019621 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.019667 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.062037 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsdkb\" (UniqueName: \"kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb\") pod \"certified-operators-mvrfd\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.081393 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pfrvj" podStartSLOduration=10.081377369 podStartE2EDuration="10.081377369s" podCreationTimestamp="2025-11-28 15:21:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:42.079479769 +0000 UTC m=+141.642263570" watchObservedRunningTime="2025-11-28 15:21:42.081377369 +0000 UTC m=+141.644161170" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.105115 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.105272 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85wjj\" (UniqueName: \"kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.105609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.198053 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.206794 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.207220 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85wjj\" (UniqueName: \"kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.207291 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.207465 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.207638 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.227470 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85wjj\" (UniqueName: \"kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj\") pod \"community-operators-hjgq4\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.251774 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.253400 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.257327 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.281275 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.488647 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:21:42 crc kubenswrapper[4884]: W1128 15:21:42.496897 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bb36abc_c535_4974_9240_80a698c0eb5d.slice/crio-64c326026160895d837c3f63c75b1e178c20b4f7b6cdb558134bbcaf4a36e4f6 WatchSource:0}: Error finding container 64c326026160895d837c3f63c75b1e178c20b4f7b6cdb558134bbcaf4a36e4f6: Status 404 returned error can't find the container with id 64c326026160895d837c3f63c75b1e178c20b4f7b6cdb558134bbcaf4a36e4f6 Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.554295 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-ls7t4\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.708219 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.724552 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.733852 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.741781 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:42 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:42 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:42 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.741821 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.773832 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:42 crc kubenswrapper[4884]: W1128 15:21:42.820463 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52b0dffb_d746_416e_9494_6562cb444a5b.slice/crio-689471d40e183179577ffbd52e57e03fdabb6483e0910180fb207a471804f396 WatchSource:0}: Error finding container 689471d40e183179577ffbd52e57e03fdabb6483e0910180fb207a471804f396: Status 404 returned error can't find the container with id 689471d40e183179577ffbd52e57e03fdabb6483e0910180fb207a471804f396 Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.964856 4884 generic.go:334] "Generic (PLEG): container finished" podID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerID="a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73" exitCode=0 Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.964952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerDied","Data":"a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.965424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerStarted","Data":"8ad9b87145dd3030a1187d904d745a78aad0fc8aafb2f9797cd358ca4140b7fd"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.968309 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.975892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerStarted","Data":"246a5391d67581e21b48bd40db8250cc61f92f83fc9c2b715d2aaf808c181213"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.975961 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerStarted","Data":"fbc973b88c37bb4546a464121e4dfa4807651873b8dace07f3bf51e2814a22d9"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.979285 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerStarted","Data":"4bf4ab95b5af12e7644956d87d612b67b66f61fc1669265f49898e3972d626fe"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.979315 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerStarted","Data":"689471d40e183179577ffbd52e57e03fdabb6483e0910180fb207a471804f396"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.982878 4884 generic.go:334] "Generic (PLEG): container finished" podID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerID="78191450d30dec958f04381afa3f59cd07c5b7db3f7ab08cc2974525999f3b84" exitCode=0 Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.983176 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerDied","Data":"78191450d30dec958f04381afa3f59cd07c5b7db3f7ab08cc2974525999f3b84"} Nov 28 15:21:42 crc kubenswrapper[4884]: I1128 15:21:42.983205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerStarted","Data":"64c326026160895d837c3f63c75b1e178c20b4f7b6cdb558134bbcaf4a36e4f6"} Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.001234 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:21:43 crc kubenswrapper[4884]: W1128 15:21:43.097772 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc42ab1b_612c_4c42_a6ab_ff39c1908565.slice/crio-a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193 WatchSource:0}: Error finding container a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193: Status 404 returned error can't find the container with id a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193 Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.342210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.343688 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.345975 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.402613 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.424765 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.424816 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.424836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dczv\" (UniqueName: \"kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.515218 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.522544 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-66tg2" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.526118 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.526170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.526205 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dczv\" (UniqueName: \"kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.526847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.526901 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.560141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dczv\" (UniqueName: \"kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv\") pod \"redhat-marketplace-22bgs\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.664126 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.740043 4884 patch_prober.go:28] interesting pod/router-default-5444994796-rhppm container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 15:21:43 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Nov 28 15:21:43 crc kubenswrapper[4884]: [+]process-running ok Nov 28 15:21:43 crc kubenswrapper[4884]: healthz check failed Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.740104 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rhppm" podUID="5b49f112-ca99-4bb7-9aaf-3aff77fbedaf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.740848 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.741871 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.748903 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.829859 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.830113 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v9g2\" (UniqueName: \"kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.830191 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.931258 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.931375 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.931398 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v9g2\" (UniqueName: \"kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.931942 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:43 crc kubenswrapper[4884]: I1128 15:21:43.931966 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.005188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" event={"ID":"fc42ab1b-612c-4c42-a6ab-ff39c1908565","Type":"ContainerStarted","Data":"724a33aaa6ef4d14ccee24172dff6fa9a91c1834cc2d0398f86f79915bfabf36"} Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.005259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" event={"ID":"fc42ab1b-612c-4c42-a6ab-ff39c1908565","Type":"ContainerStarted","Data":"a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193"} Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.005399 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.011714 4884 generic.go:334] "Generic (PLEG): container finished" podID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerID="246a5391d67581e21b48bd40db8250cc61f92f83fc9c2b715d2aaf808c181213" exitCode=0 Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.011822 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerDied","Data":"246a5391d67581e21b48bd40db8250cc61f92f83fc9c2b715d2aaf808c181213"} Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.021724 4884 generic.go:334] "Generic (PLEG): container finished" podID="52b0dffb-d746-416e-9494-6562cb444a5b" containerID="4bf4ab95b5af12e7644956d87d612b67b66f61fc1669265f49898e3972d626fe" exitCode=0 Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.021876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerDied","Data":"4bf4ab95b5af12e7644956d87d612b67b66f61fc1669265f49898e3972d626fe"} Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.026648 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.027304 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.031879 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.032011 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.038172 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v9g2\" (UniqueName: \"kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2\") pod \"redhat-marketplace-nvnc4\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.038984 4884 generic.go:334] "Generic (PLEG): container finished" podID="bd32abac-bba6-4b9e-bf45-60afad5b0e9e" containerID="22a5a3e84ca91cb76bdf3e2031cab324b8aa7d47c5e5bd47d1f01ec1ac2aa42c" exitCode=0 Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.039929 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" event={"ID":"bd32abac-bba6-4b9e-bf45-60afad5b0e9e","Type":"ContainerDied","Data":"22a5a3e84ca91cb76bdf3e2031cab324b8aa7d47c5e5bd47d1f01ec1ac2aa42c"} Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.039962 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.047751 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" podStartSLOduration=125.047727937 podStartE2EDuration="2m5.047727937s" podCreationTimestamp="2025-11-28 15:19:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:44.032314867 +0000 UTC m=+143.595098668" watchObservedRunningTime="2025-11-28 15:21:44.047727937 +0000 UTC m=+143.610511748" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.076310 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.105498 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:21:44 crc kubenswrapper[4884]: W1128 15:21:44.128045 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc6aa808_4684_4ba5_93bd_cb8ba9edca63.slice/crio-6e404c894926c58cc31441d0a44726441acc07622dcfad884a45455894515552 WatchSource:0}: Error finding container 6e404c894926c58cc31441d0a44726441acc07622dcfad884a45455894515552: Status 404 returned error can't find the container with id 6e404c894926c58cc31441d0a44726441acc07622dcfad884a45455894515552 Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.137544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.138034 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.244077 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.244652 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.245266 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.280657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.377646 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.570748 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.571808 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.575560 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.597129 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.616332 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.616987 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.623371 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.638720 4884 patch_prober.go:28] interesting pod/console-f9d7485db-tqjn2 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.638775 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-tqjn2" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.649017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.649160 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.649180 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95466\" (UniqueName: \"kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.668772 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-2m5q7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.668808 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2m5q7" podUID="a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.669054 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-2m5q7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.669069 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2m5q7" podUID="a5e68c28-e4f3-4e6b-b80d-0dbd49afdcb5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.35:8080/\": dial tcp 10.217.0.35:8080: connect: connection refused" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.738450 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.749808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.749849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95466\" (UniqueName: \"kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.749918 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.750408 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.750673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.778391 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.778875 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95466\" (UniqueName: \"kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466\") pod \"redhat-operators-rv8wt\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.945498 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jv7j2"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.946561 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.959420 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.968600 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jv7j2"] Nov 28 15:21:44 crc kubenswrapper[4884]: I1128 15:21:44.986146 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.058136 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwnwc\" (UniqueName: \"kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.058183 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.058203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.088425 4884 generic.go:334] "Generic (PLEG): container finished" podID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerID="4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae" exitCode=0 Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.088490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerDied","Data":"4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae"} Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.088775 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerStarted","Data":"6d8a674cfcd144f07c403e4fcff718ec036012b65a8ef7388f79feaffad517b3"} Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.105536 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerID="4416ec7450c2ad1ae55283471ea976574e5b041edf17e6467906b5017d466623" exitCode=0 Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.105599 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerDied","Data":"4416ec7450c2ad1ae55283471ea976574e5b041edf17e6467906b5017d466623"} Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.105623 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerStarted","Data":"6e404c894926c58cc31441d0a44726441acc07622dcfad884a45455894515552"} Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.107954 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"fa33f803-d448-44d0-8422-1e90c88ab468","Type":"ContainerStarted","Data":"779bd26e6ef98b6d867f89b07472f5a1d2237f439a10b6905599a67049047bd7"} Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.131825 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-rhppm" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.159398 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwnwc\" (UniqueName: \"kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.159442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.159457 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.161998 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.163017 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.189371 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwnwc\" (UniqueName: \"kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc\") pod \"redhat-operators-jv7j2\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.200652 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.292593 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.356364 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.439195 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:21:45 crc kubenswrapper[4884]: W1128 15:21:45.478352 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0327ad91_6ead_42fe_9911_c0eaa52128f7.slice/crio-0eb9586c1fa1e28bfd0319954d94dece0c030c10b711d3c35748dc1488ff459a WatchSource:0}: Error finding container 0eb9586c1fa1e28bfd0319954d94dece0c030c10b711d3c35748dc1488ff459a: Status 404 returned error can't find the container with id 0eb9586c1fa1e28bfd0319954d94dece0c030c10b711d3c35748dc1488ff459a Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.641331 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.670231 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jv7j2"] Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.769597 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume\") pod \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.769658 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume\") pod \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.769704 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncgpk\" (UniqueName: \"kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk\") pod \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\" (UID: \"bd32abac-bba6-4b9e-bf45-60afad5b0e9e\") " Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.774970 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume" (OuterVolumeSpecName: "config-volume") pod "bd32abac-bba6-4b9e-bf45-60afad5b0e9e" (UID: "bd32abac-bba6-4b9e-bf45-60afad5b0e9e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.777244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bd32abac-bba6-4b9e-bf45-60afad5b0e9e" (UID: "bd32abac-bba6-4b9e-bf45-60afad5b0e9e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.777465 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk" (OuterVolumeSpecName: "kube-api-access-ncgpk") pod "bd32abac-bba6-4b9e-bf45-60afad5b0e9e" (UID: "bd32abac-bba6-4b9e-bf45-60afad5b0e9e"). InnerVolumeSpecName "kube-api-access-ncgpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.875072 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncgpk\" (UniqueName: \"kubernetes.io/projected/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-kube-api-access-ncgpk\") on node \"crc\" DevicePath \"\"" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.875115 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:21:45 crc kubenswrapper[4884]: I1128 15:21:45.875125 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bd32abac-bba6-4b9e-bf45-60afad5b0e9e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.125653 4884 generic.go:334] "Generic (PLEG): container finished" podID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerID="6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d" exitCode=0 Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.125713 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerDied","Data":"6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.125736 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerStarted","Data":"6fd818e9bc5ea8c50b089f135b25061c2aba269fa8bb86018cf868d6996f811f"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.135923 4884 generic.go:334] "Generic (PLEG): container finished" podID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerID="6d76e8ed2496bfac58516c82c9a6c73da7bb3c2b78d279b1158edcc24df90bf8" exitCode=0 Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.136005 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerDied","Data":"6d76e8ed2496bfac58516c82c9a6c73da7bb3c2b78d279b1158edcc24df90bf8"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.136029 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerStarted","Data":"0eb9586c1fa1e28bfd0319954d94dece0c030c10b711d3c35748dc1488ff459a"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.143951 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"fa33f803-d448-44d0-8422-1e90c88ab468","Type":"ContainerStarted","Data":"703740560d4ab10ad881aef9eb7045d7d0f5571cc42481b6c1c899dcfb113ba3"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.157429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" event={"ID":"bd32abac-bba6-4b9e-bf45-60afad5b0e9e","Type":"ContainerDied","Data":"f72004a53add76ffa5810be395204e20e68bae9c5e2404b8fcb11027661db2b2"} Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.157482 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f72004a53add76ffa5810be395204e20e68bae9c5e2404b8fcb11027661db2b2" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.157515 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.200701 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.200675838 podStartE2EDuration="2.200675838s" podCreationTimestamp="2025-11-28 15:21:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:46.19286869 +0000 UTC m=+145.755652491" watchObservedRunningTime="2025-11-28 15:21:46.200675838 +0000 UTC m=+145.763459649" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.698450 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.699051 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.699697 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.705838 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.801557 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.801601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.805723 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.806212 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.888247 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.895390 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:46 crc kubenswrapper[4884]: I1128 15:21:46.903043 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 15:21:47 crc kubenswrapper[4884]: I1128 15:21:47.177972 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa33f803-d448-44d0-8422-1e90c88ab468" containerID="703740560d4ab10ad881aef9eb7045d7d0f5571cc42481b6c1c899dcfb113ba3" exitCode=0 Nov 28 15:21:47 crc kubenswrapper[4884]: I1128 15:21:47.178014 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"fa33f803-d448-44d0-8422-1e90c88ab468","Type":"ContainerDied","Data":"703740560d4ab10ad881aef9eb7045d7d0f5571cc42481b6c1c899dcfb113ba3"} Nov 28 15:21:47 crc kubenswrapper[4884]: W1128 15:21:47.585203 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-93ffeee766f6a7f7db3f0f408fd32b5d8767555199a5f9fd88fcf1a29683fa75 WatchSource:0}: Error finding container 93ffeee766f6a7f7db3f0f408fd32b5d8767555199a5f9fd88fcf1a29683fa75: Status 404 returned error can't find the container with id 93ffeee766f6a7f7db3f0f408fd32b5d8767555199a5f9fd88fcf1a29683fa75 Nov 28 15:21:47 crc kubenswrapper[4884]: W1128 15:21:47.589518 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-5c4d03f4c182c01af1a7d9674da144c8aceea1a30a1071afd154e9a3596878cc WatchSource:0}: Error finding container 5c4d03f4c182c01af1a7d9674da144c8aceea1a30a1071afd154e9a3596878cc: Status 404 returned error can't find the container with id 5c4d03f4c182c01af1a7d9674da144c8aceea1a30a1071afd154e9a3596878cc Nov 28 15:21:47 crc kubenswrapper[4884]: W1128 15:21:47.732591 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-822b582904418c572ba61289a282142efcaa548b837d89b443b24b57eb2d77e0 WatchSource:0}: Error finding container 822b582904418c572ba61289a282142efcaa548b837d89b443b24b57eb2d77e0: Status 404 returned error can't find the container with id 822b582904418c572ba61289a282142efcaa548b837d89b443b24b57eb2d77e0 Nov 28 15:21:48 crc kubenswrapper[4884]: I1128 15:21:48.227607 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5c4d03f4c182c01af1a7d9674da144c8aceea1a30a1071afd154e9a3596878cc"} Nov 28 15:21:48 crc kubenswrapper[4884]: I1128 15:21:48.234941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"822b582904418c572ba61289a282142efcaa548b837d89b443b24b57eb2d77e0"} Nov 28 15:21:48 crc kubenswrapper[4884]: I1128 15:21:48.237145 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"93ffeee766f6a7f7db3f0f408fd32b5d8767555199a5f9fd88fcf1a29683fa75"} Nov 28 15:21:48 crc kubenswrapper[4884]: I1128 15:21:48.944583 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.039260 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir\") pod \"fa33f803-d448-44d0-8422-1e90c88ab468\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.039355 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access\") pod \"fa33f803-d448-44d0-8422-1e90c88ab468\" (UID: \"fa33f803-d448-44d0-8422-1e90c88ab468\") " Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.040366 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "fa33f803-d448-44d0-8422-1e90c88ab468" (UID: "fa33f803-d448-44d0-8422-1e90c88ab468"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.083312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "fa33f803-d448-44d0-8422-1e90c88ab468" (UID: "fa33f803-d448-44d0-8422-1e90c88ab468"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.118283 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:21:49 crc kubenswrapper[4884]: E1128 15:21:49.118501 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd32abac-bba6-4b9e-bf45-60afad5b0e9e" containerName="collect-profiles" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.118514 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd32abac-bba6-4b9e-bf45-60afad5b0e9e" containerName="collect-profiles" Nov 28 15:21:49 crc kubenswrapper[4884]: E1128 15:21:49.118528 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa33f803-d448-44d0-8422-1e90c88ab468" containerName="pruner" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.118535 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa33f803-d448-44d0-8422-1e90c88ab468" containerName="pruner" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.118646 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd32abac-bba6-4b9e-bf45-60afad5b0e9e" containerName="collect-profiles" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.118662 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa33f803-d448-44d0-8422-1e90c88ab468" containerName="pruner" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.119003 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.132644 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.132833 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.141005 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fa33f803-d448-44d0-8422-1e90c88ab468-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.141029 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fa33f803-d448-44d0-8422-1e90c88ab468-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.175973 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.243615 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.243704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.312850 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"fa33f803-d448-44d0-8422-1e90c88ab468","Type":"ContainerDied","Data":"779bd26e6ef98b6d867f89b07472f5a1d2237f439a10b6905599a67049047bd7"} Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.312882 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="779bd26e6ef98b6d867f89b07472f5a1d2237f439a10b6905599a67049047bd7" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.312890 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.358774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.359195 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.359555 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.383236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:49 crc kubenswrapper[4884]: I1128 15:21:49.490840 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.282919 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.346293 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f3395cb59116c0f7042f2d933ce7b718fbf07cb715382241f0beb78ce0cce5b5"} Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.346478 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.358334 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"30855778-592d-4996-aba0-e136fd83ca5f","Type":"ContainerStarted","Data":"2a5ea3ca9fb7b1ac5fdcce29a59d056188701e6a9c34fad880910946bbb64a23"} Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.361547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"83bd0686a6f4b2563a0259499c65bc8e7f1c20d7f3f99ba4609eb2966a4117cc"} Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.368704 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9762e2e69a5dc0e71603acde9fa180d1d35430681f8259f5b9504d1afbf0b6b3"} Nov 28 15:21:50 crc kubenswrapper[4884]: I1128 15:21:50.774411 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-mllmk" Nov 28 15:21:51 crc kubenswrapper[4884]: I1128 15:21:51.243630 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:21:51 crc kubenswrapper[4884]: I1128 15:21:51.243706 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:21:52 crc kubenswrapper[4884]: I1128 15:21:52.441508 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"30855778-592d-4996-aba0-e136fd83ca5f","Type":"ContainerStarted","Data":"2c1374b841b424b54a9605ee72273e37a6022faf05b9838f26c09a06b3a5ba59"} Nov 28 15:21:52 crc kubenswrapper[4884]: I1128 15:21:52.457769 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.457750682 podStartE2EDuration="3.457750682s" podCreationTimestamp="2025-11-28 15:21:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:21:52.455358548 +0000 UTC m=+152.018142349" watchObservedRunningTime="2025-11-28 15:21:52.457750682 +0000 UTC m=+152.020534493" Nov 28 15:21:53 crc kubenswrapper[4884]: I1128 15:21:53.456132 4884 generic.go:334] "Generic (PLEG): container finished" podID="30855778-592d-4996-aba0-e136fd83ca5f" containerID="2c1374b841b424b54a9605ee72273e37a6022faf05b9838f26c09a06b3a5ba59" exitCode=0 Nov 28 15:21:53 crc kubenswrapper[4884]: I1128 15:21:53.456182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"30855778-592d-4996-aba0-e136fd83ca5f","Type":"ContainerDied","Data":"2c1374b841b424b54a9605ee72273e37a6022faf05b9838f26c09a06b3a5ba59"} Nov 28 15:21:54 crc kubenswrapper[4884]: I1128 15:21:54.672214 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-2m5q7" Nov 28 15:21:54 crc kubenswrapper[4884]: I1128 15:21:54.757658 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:21:54 crc kubenswrapper[4884]: I1128 15:21:54.761567 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:22:01 crc kubenswrapper[4884]: I1128 15:22:01.600701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:22:01 crc kubenswrapper[4884]: I1128 15:22:01.611131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/104ccc20-6082-4bdc-bdc7-591fa0b2b2d4-metrics-certs\") pod \"network-metrics-daemon-5nbz9\" (UID: \"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4\") " pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:22:01 crc kubenswrapper[4884]: I1128 15:22:01.906214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-5nbz9" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.554419 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"30855778-592d-4996-aba0-e136fd83ca5f","Type":"ContainerDied","Data":"2a5ea3ca9fb7b1ac5fdcce29a59d056188701e6a9c34fad880910946bbb64a23"} Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.554633 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a5ea3ca9fb7b1ac5fdcce29a59d056188701e6a9c34fad880910946bbb64a23" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.562011 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.625225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access\") pod \"30855778-592d-4996-aba0-e136fd83ca5f\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.625396 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir\") pod \"30855778-592d-4996-aba0-e136fd83ca5f\" (UID: \"30855778-592d-4996-aba0-e136fd83ca5f\") " Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.625614 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "30855778-592d-4996-aba0-e136fd83ca5f" (UID: "30855778-592d-4996-aba0-e136fd83ca5f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.634627 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "30855778-592d-4996-aba0-e136fd83ca5f" (UID: "30855778-592d-4996-aba0-e136fd83ca5f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.726439 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/30855778-592d-4996-aba0-e136fd83ca5f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.726479 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/30855778-592d-4996-aba0-e136fd83ca5f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:02 crc kubenswrapper[4884]: I1128 15:22:02.781248 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:22:03 crc kubenswrapper[4884]: I1128 15:22:03.558673 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 15:22:15 crc kubenswrapper[4884]: I1128 15:22:15.407532 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-db7mb" Nov 28 15:22:21 crc kubenswrapper[4884]: I1128 15:22:21.243713 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:22:21 crc kubenswrapper[4884]: I1128 15:22:21.244958 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:22:23 crc kubenswrapper[4884]: E1128 15:22:23.620181 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 15:22:23 crc kubenswrapper[4884]: E1128 15:22:23.620428 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k98vg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-b6kf2_openshift-marketplace(ba35f9f9-8eda-4aee-9cd8-7140421e5a2b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:23 crc kubenswrapper[4884]: E1128 15:22:23.621838 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-b6kf2" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.108466 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.108789 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-85wjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-hjgq4_openshift-marketplace(df63a7fe-4e81-4806-8b52-9f0f12ad7e43): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.110294 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-hjgq4" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.134578 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-b6kf2" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.198805 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.198978 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8v9g2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-nvnc4_openshift-marketplace(c634ff01-2f2a-491b-908c-e1525ee8715a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.200183 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-nvnc4" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.207643 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.207802 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4dczv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-22bgs_openshift-marketplace(fc6aa808-4684-4ba5-93bd-cb8ba9edca63): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.208960 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-22bgs" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.687031 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:22:24 crc kubenswrapper[4884]: E1128 15:22:24.687248 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30855778-592d-4996-aba0-e136fd83ca5f" containerName="pruner" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.687259 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="30855778-592d-4996-aba0-e136fd83ca5f" containerName="pruner" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.687344 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="30855778-592d-4996-aba0-e136fd83ca5f" containerName="pruner" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.690572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.692951 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.693005 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.709592 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.741874 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.741948 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.843082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.843196 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.843202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:24 crc kubenswrapper[4884]: I1128 15:22:24.861421 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:25 crc kubenswrapper[4884]: I1128 15:22:25.021579 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.647640 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-nvnc4" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.647654 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-hjgq4" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.647662 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-22bgs" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.721656 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.721812 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ttmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-8wcfx_openshift-marketplace(52b0dffb-d746-416e-9494-6562cb444a5b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.723004 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-8wcfx" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.736954 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.737126 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fsdkb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-mvrfd_openshift-marketplace(1bb36abc-c535-4974-9240-80a698c0eb5d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:25 crc kubenswrapper[4884]: E1128 15:22:25.738414 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-mvrfd" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" Nov 28 15:22:26 crc kubenswrapper[4884]: I1128 15:22:26.898665 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.415459 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-8wcfx" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.415606 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-mvrfd" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.504553 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.504955 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lwnwc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-jv7j2_openshift-marketplace(6e7cdd36-55ff-41fd-bf5f-393429f8470b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.507323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-jv7j2" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.510879 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.510988 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-95466,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rv8wt_openshift-marketplace(0327ad91-6ead-42fe-9911-c0eaa52128f7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.512102 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rv8wt" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.719965 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rv8wt" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" Nov 28 15:22:28 crc kubenswrapper[4884]: E1128 15:22:28.720051 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-jv7j2" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" Nov 28 15:22:28 crc kubenswrapper[4884]: I1128 15:22:28.855868 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-5nbz9"] Nov 28 15:22:28 crc kubenswrapper[4884]: I1128 15:22:28.869428 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 15:22:28 crc kubenswrapper[4884]: W1128 15:22:28.878214 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod2c3df941_8d76_47c9_b43b_98c5ecedea57.slice/crio-7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718 WatchSource:0}: Error finding container 7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718: Status 404 returned error can't find the container with id 7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718 Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.727482 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" event={"ID":"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4","Type":"ContainerStarted","Data":"ca0eb21c5ec12ad3b9b1f2720b72c8396ee5997b53d5e8935d54b2164b8fa252"} Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.728116 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" event={"ID":"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4","Type":"ContainerStarted","Data":"b34a75044ca4eb704d926427b531c4a35ba3f5d556d7d3c8bb20ec85cb4590d1"} Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.728128 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-5nbz9" event={"ID":"104ccc20-6082-4bdc-bdc7-591fa0b2b2d4","Type":"ContainerStarted","Data":"75113f1023cc3209e1bc95a94084244fc06602053c8232746ee3278ace0d1f3a"} Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.729506 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2c3df941-8d76-47c9-b43b-98c5ecedea57","Type":"ContainerStarted","Data":"2c19df993064cfed4172f37a076c938c248a1d3919284e3fe5522a466379fbb8"} Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.729555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2c3df941-8d76-47c9-b43b-98c5ecedea57","Type":"ContainerStarted","Data":"7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718"} Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.755670 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-5nbz9" podStartSLOduration=171.755630851 podStartE2EDuration="2m51.755630851s" podCreationTimestamp="2025-11-28 15:19:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:22:29.746006926 +0000 UTC m=+189.308790727" watchObservedRunningTime="2025-11-28 15:22:29.755630851 +0000 UTC m=+189.318414702" Nov 28 15:22:29 crc kubenswrapper[4884]: I1128 15:22:29.767698 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=5.767670811 podStartE2EDuration="5.767670811s" podCreationTimestamp="2025-11-28 15:22:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:22:29.761492737 +0000 UTC m=+189.324276558" watchObservedRunningTime="2025-11-28 15:22:29.767670811 +0000 UTC m=+189.330454652" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.090896 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.092873 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.119019 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.119156 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.119178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.122713 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.220178 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.220238 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.220314 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.220409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.220414 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.245077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access\") pod \"installer-9-crc\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.420563 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.736254 4884 generic.go:334] "Generic (PLEG): container finished" podID="2c3df941-8d76-47c9-b43b-98c5ecedea57" containerID="2c19df993064cfed4172f37a076c938c248a1d3919284e3fe5522a466379fbb8" exitCode=0 Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.736312 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2c3df941-8d76-47c9-b43b-98c5ecedea57","Type":"ContainerDied","Data":"2c19df993064cfed4172f37a076c938c248a1d3919284e3fe5522a466379fbb8"} Nov 28 15:22:30 crc kubenswrapper[4884]: I1128 15:22:30.848280 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 15:22:30 crc kubenswrapper[4884]: W1128 15:22:30.857864 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod4f1f9e01_a0ef_48d8_9f42_dcb87c799e5f.slice/crio-3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67 WatchSource:0}: Error finding container 3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67: Status 404 returned error can't find the container with id 3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67 Nov 28 15:22:31 crc kubenswrapper[4884]: I1128 15:22:31.745839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f","Type":"ContainerStarted","Data":"ae37c55d07ab4b462bc57698a859b0abc1a9078bba8b87d04e59b8c1e01d3092"} Nov 28 15:22:31 crc kubenswrapper[4884]: I1128 15:22:31.746200 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f","Type":"ContainerStarted","Data":"3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67"} Nov 28 15:22:31 crc kubenswrapper[4884]: I1128 15:22:31.775800 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.775778809 podStartE2EDuration="1.775778809s" podCreationTimestamp="2025-11-28 15:22:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:22:31.770597922 +0000 UTC m=+191.333381733" watchObservedRunningTime="2025-11-28 15:22:31.775778809 +0000 UTC m=+191.338562610" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.031772 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.047692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir\") pod \"2c3df941-8d76-47c9-b43b-98c5ecedea57\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.047812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access\") pod \"2c3df941-8d76-47c9-b43b-98c5ecedea57\" (UID: \"2c3df941-8d76-47c9-b43b-98c5ecedea57\") " Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.048729 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2c3df941-8d76-47c9-b43b-98c5ecedea57" (UID: "2c3df941-8d76-47c9-b43b-98c5ecedea57"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.057503 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2c3df941-8d76-47c9-b43b-98c5ecedea57" (UID: "2c3df941-8d76-47c9-b43b-98c5ecedea57"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.149609 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c3df941-8d76-47c9-b43b-98c5ecedea57-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.149656 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c3df941-8d76-47c9-b43b-98c5ecedea57-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.754527 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"2c3df941-8d76-47c9-b43b-98c5ecedea57","Type":"ContainerDied","Data":"7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718"} Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.754618 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f621c28a84a58ef6da3b29b845b7e750f6608b34ff674a7a996ee54b10da718" Nov 28 15:22:32 crc kubenswrapper[4884]: I1128 15:22:32.754566 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 15:22:37 crc kubenswrapper[4884]: I1128 15:22:37.777353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerStarted","Data":"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9"} Nov 28 15:22:38 crc kubenswrapper[4884]: I1128 15:22:38.783275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerStarted","Data":"ce8a757046e3bb42d8744deadd66f9e55a16ef11b76c4246b53af209104ac137"} Nov 28 15:22:38 crc kubenswrapper[4884]: I1128 15:22:38.785477 4884 generic.go:334] "Generic (PLEG): container finished" podID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerID="aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9" exitCode=0 Nov 28 15:22:38 crc kubenswrapper[4884]: I1128 15:22:38.785512 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerDied","Data":"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9"} Nov 28 15:22:39 crc kubenswrapper[4884]: I1128 15:22:39.795940 4884 generic.go:334] "Generic (PLEG): container finished" podID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerID="ce8a757046e3bb42d8744deadd66f9e55a16ef11b76c4246b53af209104ac137" exitCode=0 Nov 28 15:22:39 crc kubenswrapper[4884]: I1128 15:22:39.796027 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerDied","Data":"ce8a757046e3bb42d8744deadd66f9e55a16ef11b76c4246b53af209104ac137"} Nov 28 15:22:39 crc kubenswrapper[4884]: I1128 15:22:39.797927 4884 generic.go:334] "Generic (PLEG): container finished" podID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerID="3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922" exitCode=0 Nov 28 15:22:39 crc kubenswrapper[4884]: I1128 15:22:39.797953 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerDied","Data":"3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922"} Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.806547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerStarted","Data":"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027"} Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.808853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerStarted","Data":"dff5a6146e4bef9c93ea8727ded634282c2c1268896d174813f93bd716d085d3"} Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.811119 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerStarted","Data":"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811"} Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.812977 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerID="71c9a579495e856da43a8c29570c78b8ac2f43186cc4c52d9f5d6d76690e78d3" exitCode=0 Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.813015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerDied","Data":"71c9a579495e856da43a8c29570c78b8ac2f43186cc4c52d9f5d6d76690e78d3"} Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.826377 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b6kf2" podStartSLOduration=2.398123853 podStartE2EDuration="59.826354394s" podCreationTimestamp="2025-11-28 15:21:41 +0000 UTC" firstStartedPulling="2025-11-28 15:21:42.967968535 +0000 UTC m=+142.530752336" lastFinishedPulling="2025-11-28 15:22:40.396199076 +0000 UTC m=+199.958982877" observedRunningTime="2025-11-28 15:22:40.825187363 +0000 UTC m=+200.387971194" watchObservedRunningTime="2025-11-28 15:22:40.826354394 +0000 UTC m=+200.389138195" Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.840847 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hjgq4" podStartSLOduration=3.513033095 podStartE2EDuration="59.840831874s" podCreationTimestamp="2025-11-28 15:21:41 +0000 UTC" firstStartedPulling="2025-11-28 15:21:44.014761681 +0000 UTC m=+143.577545482" lastFinishedPulling="2025-11-28 15:22:40.34256046 +0000 UTC m=+199.905344261" observedRunningTime="2025-11-28 15:22:40.839284174 +0000 UTC m=+200.402067995" watchObservedRunningTime="2025-11-28 15:22:40.840831874 +0000 UTC m=+200.403615675" Nov 28 15:22:40 crc kubenswrapper[4884]: I1128 15:22:40.887456 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nvnc4" podStartSLOduration=2.9518035080000002 podStartE2EDuration="57.887437836s" podCreationTimestamp="2025-11-28 15:21:43 +0000 UTC" firstStartedPulling="2025-11-28 15:21:45.090939557 +0000 UTC m=+144.653723358" lastFinishedPulling="2025-11-28 15:22:40.026573885 +0000 UTC m=+199.589357686" observedRunningTime="2025-11-28 15:22:40.885713861 +0000 UTC m=+200.448497662" watchObservedRunningTime="2025-11-28 15:22:40.887437836 +0000 UTC m=+200.450221637" Nov 28 15:22:41 crc kubenswrapper[4884]: I1128 15:22:41.819368 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerStarted","Data":"339d1f2c0b3f44e245ca91e8e46021d0aa27c6381f3f031f77603d36dfdc1378"} Nov 28 15:22:41 crc kubenswrapper[4884]: I1128 15:22:41.835476 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-22bgs" podStartSLOduration=2.500617991 podStartE2EDuration="58.835445231s" podCreationTimestamp="2025-11-28 15:21:43 +0000 UTC" firstStartedPulling="2025-11-28 15:21:45.12487112 +0000 UTC m=+144.687654921" lastFinishedPulling="2025-11-28 15:22:41.45969836 +0000 UTC m=+201.022482161" observedRunningTime="2025-11-28 15:22:41.833818309 +0000 UTC m=+201.396602130" watchObservedRunningTime="2025-11-28 15:22:41.835445231 +0000 UTC m=+201.398229032" Nov 28 15:22:41 crc kubenswrapper[4884]: I1128 15:22:41.869562 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:22:41 crc kubenswrapper[4884]: I1128 15:22:41.869855 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:22:42 crc kubenswrapper[4884]: I1128 15:22:42.281871 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:42 crc kubenswrapper[4884]: I1128 15:22:42.281991 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:42 crc kubenswrapper[4884]: I1128 15:22:42.940434 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-b6kf2" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="registry-server" probeResult="failure" output=< Nov 28 15:22:42 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:22:42 crc kubenswrapper[4884]: > Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.319546 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-hjgq4" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="registry-server" probeResult="failure" output=< Nov 28 15:22:43 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:22:43 crc kubenswrapper[4884]: > Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.664783 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.664864 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.716054 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.828027 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerStarted","Data":"d7dd41d05acc94f3c5984482ada0bdeaede674c3436493f611fb7fb9badd2815"} Nov 28 15:22:43 crc kubenswrapper[4884]: I1128 15:22:43.832861 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerStarted","Data":"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d"} Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.077470 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.077513 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.118881 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.838813 4884 generic.go:334] "Generic (PLEG): container finished" podID="52b0dffb-d746-416e-9494-6562cb444a5b" containerID="d7dd41d05acc94f3c5984482ada0bdeaede674c3436493f611fb7fb9badd2815" exitCode=0 Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.838853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerDied","Data":"d7dd41d05acc94f3c5984482ada0bdeaede674c3436493f611fb7fb9badd2815"} Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.843734 4884 generic.go:334] "Generic (PLEG): container finished" podID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerID="00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d" exitCode=0 Nov 28 15:22:44 crc kubenswrapper[4884]: I1128 15:22:44.844110 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerDied","Data":"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d"} Nov 28 15:22:45 crc kubenswrapper[4884]: I1128 15:22:45.852451 4884 generic.go:334] "Generic (PLEG): container finished" podID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerID="44b021888a135c4cab70589e4e02bfe61fd80706f0416fe9f9412760a63136c3" exitCode=0 Nov 28 15:22:45 crc kubenswrapper[4884]: I1128 15:22:45.852548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerDied","Data":"44b021888a135c4cab70589e4e02bfe61fd80706f0416fe9f9412760a63136c3"} Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.243232 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.243734 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.243786 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.244422 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.244544 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a" gracePeriod=600 Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.911293 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:22:51 crc kubenswrapper[4884]: I1128 15:22:51.950709 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:22:52 crc kubenswrapper[4884]: I1128 15:22:52.343734 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:52 crc kubenswrapper[4884]: I1128 15:22:52.395967 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:52 crc kubenswrapper[4884]: I1128 15:22:52.895035 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerStarted","Data":"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2"} Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.142580 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.699958 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.902562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerStarted","Data":"057fda1dc83274fb0b7fcd966d5f822c474c0dc2a61172987dc4eb272919d2dd"} Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.904030 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a" exitCode=0 Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.904146 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a"} Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.904460 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hjgq4" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="registry-server" containerID="cri-o://dff5a6146e4bef9c93ea8727ded634282c2c1268896d174813f93bd716d085d3" gracePeriod=2 Nov 28 15:22:53 crc kubenswrapper[4884]: I1128 15:22:53.930005 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jv7j2" podStartSLOduration=5.3175152390000004 podStartE2EDuration="1m9.929982239s" podCreationTimestamp="2025-11-28 15:21:44 +0000 UTC" firstStartedPulling="2025-11-28 15:21:46.129927966 +0000 UTC m=+145.692711767" lastFinishedPulling="2025-11-28 15:22:50.742394966 +0000 UTC m=+210.305178767" observedRunningTime="2025-11-28 15:22:53.926759065 +0000 UTC m=+213.489542866" watchObservedRunningTime="2025-11-28 15:22:53.929982239 +0000 UTC m=+213.492766040" Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.126493 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.913933 4884 generic.go:334] "Generic (PLEG): container finished" podID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerID="dff5a6146e4bef9c93ea8727ded634282c2c1268896d174813f93bd716d085d3" exitCode=0 Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.914018 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerDied","Data":"dff5a6146e4bef9c93ea8727ded634282c2c1268896d174813f93bd716d085d3"} Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.916543 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerStarted","Data":"1559e028d0dbe09f7198565a9e93906833e3b97337ee42b78f6f43e3f04099eb"} Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.923791 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerStarted","Data":"7fdf91df25bc65a6090c43a78e333c3601ee9e95b7868a4a24ae36cd080437f9"} Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.926325 4884 generic.go:334] "Generic (PLEG): container finished" podID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerID="057fda1dc83274fb0b7fcd966d5f822c474c0dc2a61172987dc4eb272919d2dd" exitCode=0 Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.926405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerDied","Data":"057fda1dc83274fb0b7fcd966d5f822c474c0dc2a61172987dc4eb272919d2dd"} Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.934026 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8wcfx" podStartSLOduration=3.300719392 podStartE2EDuration="1m13.934003623s" podCreationTimestamp="2025-11-28 15:21:41 +0000 UTC" firstStartedPulling="2025-11-28 15:21:44.024922731 +0000 UTC m=+143.587706532" lastFinishedPulling="2025-11-28 15:22:54.658206962 +0000 UTC m=+214.220990763" observedRunningTime="2025-11-28 15:22:54.932198876 +0000 UTC m=+214.494982697" watchObservedRunningTime="2025-11-28 15:22:54.934003623 +0000 UTC m=+214.496787424" Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.937199 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97"} Nov 28 15:22:54 crc kubenswrapper[4884]: I1128 15:22:54.953632 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mvrfd" podStartSLOduration=2.290145168 podStartE2EDuration="1m13.953614627s" podCreationTimestamp="2025-11-28 15:21:41 +0000 UTC" firstStartedPulling="2025-11-28 15:21:42.984516955 +0000 UTC m=+142.547300756" lastFinishedPulling="2025-11-28 15:22:54.647986414 +0000 UTC m=+214.210770215" observedRunningTime="2025-11-28 15:22:54.951650126 +0000 UTC m=+214.514433927" watchObservedRunningTime="2025-11-28 15:22:54.953614627 +0000 UTC m=+214.516398428" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.065487 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.096841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities\") pod \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.096933 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content\") pod \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.097022 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85wjj\" (UniqueName: \"kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj\") pod \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\" (UID: \"df63a7fe-4e81-4806-8b52-9f0f12ad7e43\") " Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.098024 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities" (OuterVolumeSpecName: "utilities") pod "df63a7fe-4e81-4806-8b52-9f0f12ad7e43" (UID: "df63a7fe-4e81-4806-8b52-9f0f12ad7e43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.112328 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj" (OuterVolumeSpecName: "kube-api-access-85wjj") pod "df63a7fe-4e81-4806-8b52-9f0f12ad7e43" (UID: "df63a7fe-4e81-4806-8b52-9f0f12ad7e43"). InnerVolumeSpecName "kube-api-access-85wjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.144906 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df63a7fe-4e81-4806-8b52-9f0f12ad7e43" (UID: "df63a7fe-4e81-4806-8b52-9f0f12ad7e43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.198190 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.198245 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.198261 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85wjj\" (UniqueName: \"kubernetes.io/projected/df63a7fe-4e81-4806-8b52-9f0f12ad7e43-kube-api-access-85wjj\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.293284 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.293342 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.942424 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.944301 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nvnc4" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="registry-server" containerID="cri-o://a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811" gracePeriod=2 Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.948355 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerStarted","Data":"91afbe1ed1204860d5bb8a94e87a81f9e945c747a44ed7833fe814fddc7e724f"} Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.950449 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hjgq4" event={"ID":"df63a7fe-4e81-4806-8b52-9f0f12ad7e43","Type":"ContainerDied","Data":"fbc973b88c37bb4546a464121e4dfa4807651873b8dace07f3bf51e2814a22d9"} Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.950498 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hjgq4" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.950505 4884 scope.go:117] "RemoveContainer" containerID="dff5a6146e4bef9c93ea8727ded634282c2c1268896d174813f93bd716d085d3" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.966839 4884 scope.go:117] "RemoveContainer" containerID="ce8a757046e3bb42d8744deadd66f9e55a16ef11b76c4246b53af209104ac137" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.974675 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rv8wt" podStartSLOduration=2.7464080539999998 podStartE2EDuration="1m11.974653527s" podCreationTimestamp="2025-11-28 15:21:44 +0000 UTC" firstStartedPulling="2025-11-28 15:21:46.140888888 +0000 UTC m=+145.703672689" lastFinishedPulling="2025-11-28 15:22:55.369134331 +0000 UTC m=+214.931918162" observedRunningTime="2025-11-28 15:22:55.972814479 +0000 UTC m=+215.535598280" watchObservedRunningTime="2025-11-28 15:22:55.974653527 +0000 UTC m=+215.537437328" Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.994446 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:22:55 crc kubenswrapper[4884]: I1128 15:22:55.998338 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hjgq4"] Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.007382 4884 scope.go:117] "RemoveContainer" containerID="246a5391d67581e21b48bd40db8250cc61f92f83fc9c2b715d2aaf808c181213" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.308713 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.355515 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jv7j2" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="registry-server" probeResult="failure" output=< Nov 28 15:22:56 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:22:56 crc kubenswrapper[4884]: > Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.437351 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities\") pod \"c634ff01-2f2a-491b-908c-e1525ee8715a\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.437410 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content\") pod \"c634ff01-2f2a-491b-908c-e1525ee8715a\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.437508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v9g2\" (UniqueName: \"kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2\") pod \"c634ff01-2f2a-491b-908c-e1525ee8715a\" (UID: \"c634ff01-2f2a-491b-908c-e1525ee8715a\") " Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.438448 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities" (OuterVolumeSpecName: "utilities") pod "c634ff01-2f2a-491b-908c-e1525ee8715a" (UID: "c634ff01-2f2a-491b-908c-e1525ee8715a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.453385 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2" (OuterVolumeSpecName: "kube-api-access-8v9g2") pod "c634ff01-2f2a-491b-908c-e1525ee8715a" (UID: "c634ff01-2f2a-491b-908c-e1525ee8715a"). InnerVolumeSpecName "kube-api-access-8v9g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.461874 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c634ff01-2f2a-491b-908c-e1525ee8715a" (UID: "c634ff01-2f2a-491b-908c-e1525ee8715a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.538665 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v9g2\" (UniqueName: \"kubernetes.io/projected/c634ff01-2f2a-491b-908c-e1525ee8715a-kube-api-access-8v9g2\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.538722 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.538741 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c634ff01-2f2a-491b-908c-e1525ee8715a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.695233 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" path="/var/lib/kubelet/pods/df63a7fe-4e81-4806-8b52-9f0f12ad7e43/volumes" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.963261 4884 generic.go:334] "Generic (PLEG): container finished" podID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerID="a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811" exitCode=0 Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.963806 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nvnc4" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.963981 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerDied","Data":"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811"} Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.964058 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nvnc4" event={"ID":"c634ff01-2f2a-491b-908c-e1525ee8715a","Type":"ContainerDied","Data":"6d8a674cfcd144f07c403e4fcff718ec036012b65a8ef7388f79feaffad517b3"} Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.964136 4884 scope.go:117] "RemoveContainer" containerID="a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811" Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.990777 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.995452 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nvnc4"] Nov 28 15:22:56 crc kubenswrapper[4884]: I1128 15:22:56.997340 4884 scope.go:117] "RemoveContainer" containerID="aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.017187 4884 scope.go:117] "RemoveContainer" containerID="4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.051347 4884 scope.go:117] "RemoveContainer" containerID="a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811" Nov 28 15:22:57 crc kubenswrapper[4884]: E1128 15:22:57.051772 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811\": container with ID starting with a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811 not found: ID does not exist" containerID="a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.051818 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811"} err="failed to get container status \"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811\": rpc error: code = NotFound desc = could not find container \"a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811\": container with ID starting with a53202b65371ffea473fec67a11c5b60ad729f0ef4cc31f541e8c47b3c66b811 not found: ID does not exist" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.051849 4884 scope.go:117] "RemoveContainer" containerID="aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9" Nov 28 15:22:57 crc kubenswrapper[4884]: E1128 15:22:57.052300 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9\": container with ID starting with aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9 not found: ID does not exist" containerID="aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.052343 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9"} err="failed to get container status \"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9\": rpc error: code = NotFound desc = could not find container \"aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9\": container with ID starting with aa69258bd460e240bfa0cdadd93ac8130d6758d96cda79445db9d4d3455e99b9 not found: ID does not exist" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.052395 4884 scope.go:117] "RemoveContainer" containerID="4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae" Nov 28 15:22:57 crc kubenswrapper[4884]: E1128 15:22:57.054424 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae\": container with ID starting with 4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae not found: ID does not exist" containerID="4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae" Nov 28 15:22:57 crc kubenswrapper[4884]: I1128 15:22:57.054472 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae"} err="failed to get container status \"4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae\": rpc error: code = NotFound desc = could not find container \"4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae\": container with ID starting with 4cf6a811a1272c91f131e591e6a4957cabc6f29a424f4b1e26b7526161f057ae not found: ID does not exist" Nov 28 15:22:58 crc kubenswrapper[4884]: I1128 15:22:58.698793 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" path="/var/lib/kubelet/pods/c634ff01-2f2a-491b-908c-e1525ee8715a/volumes" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.254281 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.254857 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.258319 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.258735 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.300705 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:02 crc kubenswrapper[4884]: I1128 15:23:02.318401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:23:03 crc kubenswrapper[4884]: I1128 15:23:03.065052 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:23:03 crc kubenswrapper[4884]: I1128 15:23:03.068556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:03 crc kubenswrapper[4884]: I1128 15:23:03.936942 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:23:04 crc kubenswrapper[4884]: I1128 15:23:04.303898 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4449t"] Nov 28 15:23:04 crc kubenswrapper[4884]: I1128 15:23:04.960508 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:23:04 crc kubenswrapper[4884]: I1128 15:23:04.961947 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:23:05 crc kubenswrapper[4884]: I1128 15:23:05.008631 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mvrfd" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="registry-server" containerID="cri-o://7fdf91df25bc65a6090c43a78e333c3601ee9e95b7868a4a24ae36cd080437f9" gracePeriod=2 Nov 28 15:23:05 crc kubenswrapper[4884]: I1128 15:23:05.032970 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:23:05 crc kubenswrapper[4884]: I1128 15:23:05.342451 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:23:05 crc kubenswrapper[4884]: I1128 15:23:05.412300 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:23:06 crc kubenswrapper[4884]: I1128 15:23:06.014797 4884 generic.go:334] "Generic (PLEG): container finished" podID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerID="7fdf91df25bc65a6090c43a78e333c3601ee9e95b7868a4a24ae36cd080437f9" exitCode=0 Nov 28 15:23:06 crc kubenswrapper[4884]: I1128 15:23:06.016214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerDied","Data":"7fdf91df25bc65a6090c43a78e333c3601ee9e95b7868a4a24ae36cd080437f9"} Nov 28 15:23:06 crc kubenswrapper[4884]: I1128 15:23:06.049465 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.444736 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.484654 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities\") pod \"1bb36abc-c535-4974-9240-80a698c0eb5d\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.484721 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsdkb\" (UniqueName: \"kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb\") pod \"1bb36abc-c535-4974-9240-80a698c0eb5d\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.484767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content\") pod \"1bb36abc-c535-4974-9240-80a698c0eb5d\" (UID: \"1bb36abc-c535-4974-9240-80a698c0eb5d\") " Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.486187 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities" (OuterVolumeSpecName: "utilities") pod "1bb36abc-c535-4974-9240-80a698c0eb5d" (UID: "1bb36abc-c535-4974-9240-80a698c0eb5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.491057 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb" (OuterVolumeSpecName: "kube-api-access-fsdkb") pod "1bb36abc-c535-4974-9240-80a698c0eb5d" (UID: "1bb36abc-c535-4974-9240-80a698c0eb5d"). InnerVolumeSpecName "kube-api-access-fsdkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.527704 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1bb36abc-c535-4974-9240-80a698c0eb5d" (UID: "1bb36abc-c535-4974-9240-80a698c0eb5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.586063 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.586134 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsdkb\" (UniqueName: \"kubernetes.io/projected/1bb36abc-c535-4974-9240-80a698c0eb5d-kube-api-access-fsdkb\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.586145 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1bb36abc-c535-4974-9240-80a698c0eb5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.737806 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jv7j2"] Nov 28 15:23:07 crc kubenswrapper[4884]: I1128 15:23:07.738016 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jv7j2" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="registry-server" containerID="cri-o://ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2" gracePeriod=2 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.034220 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mvrfd" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.034186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mvrfd" event={"ID":"1bb36abc-c535-4974-9240-80a698c0eb5d","Type":"ContainerDied","Data":"64c326026160895d837c3f63c75b1e178c20b4f7b6cdb558134bbcaf4a36e4f6"} Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.035125 4884 scope.go:117] "RemoveContainer" containerID="7fdf91df25bc65a6090c43a78e333c3601ee9e95b7868a4a24ae36cd080437f9" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.073600 4884 scope.go:117] "RemoveContainer" containerID="44b021888a135c4cab70589e4e02bfe61fd80706f0416fe9f9412760a63136c3" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.080833 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.083687 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mvrfd"] Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.114082 4884 scope.go:117] "RemoveContainer" containerID="78191450d30dec958f04381afa3f59cd07c5b7db3f7ab08cc2974525999f3b84" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.625016 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.697617 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" path="/var/lib/kubelet/pods/1bb36abc-c535-4974-9240-80a698c0eb5d/volumes" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.700017 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwnwc\" (UniqueName: \"kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc\") pod \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.700123 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content\") pod \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.700149 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities\") pod \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\" (UID: \"6e7cdd36-55ff-41fd-bf5f-393429f8470b\") " Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.701044 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities" (OuterVolumeSpecName: "utilities") pod "6e7cdd36-55ff-41fd-bf5f-393429f8470b" (UID: "6e7cdd36-55ff-41fd-bf5f-393429f8470b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.704495 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc" (OuterVolumeSpecName: "kube-api-access-lwnwc") pod "6e7cdd36-55ff-41fd-bf5f-393429f8470b" (UID: "6e7cdd36-55ff-41fd-bf5f-393429f8470b"). InnerVolumeSpecName "kube-api-access-lwnwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.802028 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.802079 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwnwc\" (UniqueName: \"kubernetes.io/projected/6e7cdd36-55ff-41fd-bf5f-393429f8470b-kube-api-access-lwnwc\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.844484 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e7cdd36-55ff-41fd-bf5f-393429f8470b" (UID: "6e7cdd36-55ff-41fd-bf5f-393429f8470b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865413 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865687 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865709 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865732 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3df941-8d76-47c9-b43b-98c5ecedea57" containerName="pruner" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865740 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3df941-8d76-47c9-b43b-98c5ecedea57" containerName="pruner" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865748 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865756 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865769 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865776 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865791 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865798 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865806 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865813 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865828 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865836 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865845 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865853 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="extract-content" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865862 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865869 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865882 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865889 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865899 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865907 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865919 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865927 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.865940 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.865948 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="extract-utilities" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866050 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="df63a7fe-4e81-4806-8b52-9f0f12ad7e43" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866061 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866074 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bb36abc-c535-4974-9240-80a698c0eb5d" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866110 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c634ff01-2f2a-491b-908c-e1525ee8715a" containerName="registry-server" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866126 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3df941-8d76-47c9-b43b-98c5ecedea57" containerName="pruner" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866509 4884 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.866526 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.867224 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17" gracePeriod=15 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.867261 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0" gracePeriod=15 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.867296 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0" gracePeriod=15 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.867362 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a" gracePeriod=15 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.867512 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968" gracePeriod=15 Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869135 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869334 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869355 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869365 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869373 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869388 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869398 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869411 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869419 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869428 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869461 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869472 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869481 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: E1128 15:23:08.869489 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869497 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869616 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869628 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869640 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869653 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869662 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.869672 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.912291 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.913598 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.913677 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.913833 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.913918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.913947 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.914015 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.914141 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.914222 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:08 crc kubenswrapper[4884]: I1128 15:23:08.914307 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e7cdd36-55ff-41fd-bf5f-393429f8470b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015686 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015865 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015887 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015917 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.015983 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016019 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016039 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016063 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016156 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016084 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.016202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.042234 4884 generic.go:334] "Generic (PLEG): container finished" podID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" containerID="ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2" exitCode=0 Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.042298 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerDied","Data":"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2"} Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.042326 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jv7j2" event={"ID":"6e7cdd36-55ff-41fd-bf5f-393429f8470b","Type":"ContainerDied","Data":"6fd818e9bc5ea8c50b089f135b25061c2aba269fa8bb86018cf868d6996f811f"} Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.042345 4884 scope.go:117] "RemoveContainer" containerID="ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.042450 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jv7j2" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.043603 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.043881 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.044208 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.047816 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.049149 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.049925 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968" exitCode=0 Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.049941 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0" exitCode=0 Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.049947 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0" exitCode=0 Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.049953 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a" exitCode=2 Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.062994 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.063335 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.063630 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.064459 4884 scope.go:117] "RemoveContainer" containerID="00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.138925 4884 scope.go:117] "RemoveContainer" containerID="6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.154413 4884 scope.go:117] "RemoveContainer" containerID="ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2" Nov 28 15:23:09 crc kubenswrapper[4884]: E1128 15:23:09.155006 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2\": container with ID starting with ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2 not found: ID does not exist" containerID="ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.155052 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2"} err="failed to get container status \"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2\": rpc error: code = NotFound desc = could not find container \"ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2\": container with ID starting with ffac2e3f6a8e96af763a9cbbdfe6c3e5f517970620cde79f0a1edff357e4c0f2 not found: ID does not exist" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.155082 4884 scope.go:117] "RemoveContainer" containerID="00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d" Nov 28 15:23:09 crc kubenswrapper[4884]: E1128 15:23:09.155737 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d\": container with ID starting with 00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d not found: ID does not exist" containerID="00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.155800 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d"} err="failed to get container status \"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d\": rpc error: code = NotFound desc = could not find container \"00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d\": container with ID starting with 00dffbf02bbef475178a53fae00de2937b666c80742d3a36467a26fee0827d8d not found: ID does not exist" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.155825 4884 scope.go:117] "RemoveContainer" containerID="6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d" Nov 28 15:23:09 crc kubenswrapper[4884]: E1128 15:23:09.156251 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d\": container with ID starting with 6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d not found: ID does not exist" containerID="6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.156319 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d"} err="failed to get container status \"6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d\": rpc error: code = NotFound desc = could not find container \"6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d\": container with ID starting with 6cf86a63b1444a002d6eb3f790516cf20f8a586704dae94a87907d4e1307058d not found: ID does not exist" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.156359 4884 scope.go:117] "RemoveContainer" containerID="6ca6d903b8b2e6a0193f8812fa9f10cb8ba927b54ec78840cf81f08962e911dc" Nov 28 15:23:09 crc kubenswrapper[4884]: I1128 15:23:09.213757 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:09 crc kubenswrapper[4884]: E1128 15:23:09.234286 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.189:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c34f6f111deb8 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,LastTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.065182 4884 generic.go:334] "Generic (PLEG): container finished" podID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" containerID="ae37c55d07ab4b462bc57698a859b0abc1a9078bba8b87d04e59b8c1e01d3092" exitCode=0 Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.065313 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f","Type":"ContainerDied","Data":"ae37c55d07ab4b462bc57698a859b0abc1a9078bba8b87d04e59b8c1e01d3092"} Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.066775 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.067281 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.067758 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.068250 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.070892 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.073713 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90"} Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.073773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"dbade025f6ff130ec376972d5dab5d265d0ea5491e0e0fbff65daa79a8bfc85e"} Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.074434 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.074817 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.075337 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.075893 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: E1128 15:23:10.374305 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.189:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c34f6f111deb8 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,LastTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.695061 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.695540 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.696082 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:10 crc kubenswrapper[4884]: I1128 15:23:10.696620 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.281328 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.283034 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.283723 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.284136 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.284483 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.284735 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.328100 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.328779 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.329357 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.330056 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.330372 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.343917 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.344046 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.344573 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.344648 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.344738 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.344924 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.345318 4884 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.345355 4884 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.345371 4884 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.445944 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access\") pod \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.446003 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock\") pod \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.446048 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir\") pod \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\" (UID: \"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f\") " Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.446340 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" (UID: "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.446378 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock" (OuterVolumeSpecName: "var-lock") pod "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" (UID: "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.459572 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" (UID: "4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.547386 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.547419 4884 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:11 crc kubenswrapper[4884]: I1128 15:23:11.547429 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.087461 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.087470 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f","Type":"ContainerDied","Data":"3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67"} Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.087531 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3297acc61d18fd6f66405e287bc321163c25a8a7c8d3e81f62873a729803cb67" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.091916 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.093050 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17" exitCode=0 Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.093125 4884 scope.go:117] "RemoveContainer" containerID="c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.093364 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.102462 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.103269 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.103935 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.104266 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.116016 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.116361 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.116848 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.119285 4884 scope.go:117] "RemoveContainer" containerID="702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.120939 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.135782 4884 scope.go:117] "RemoveContainer" containerID="793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.149880 4884 scope.go:117] "RemoveContainer" containerID="d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.161521 4884 scope.go:117] "RemoveContainer" containerID="0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.176420 4884 scope.go:117] "RemoveContainer" containerID="d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.196455 4884 scope.go:117] "RemoveContainer" containerID="c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.198717 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\": container with ID starting with c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968 not found: ID does not exist" containerID="c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.198755 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968"} err="failed to get container status \"c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\": rpc error: code = NotFound desc = could not find container \"c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968\": container with ID starting with c1d140771e129da2a66ce798211acb3b33b6d425ad664c1163187157574ec968 not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.198796 4884 scope.go:117] "RemoveContainer" containerID="702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.204229 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\": container with ID starting with 702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0 not found: ID does not exist" containerID="702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.204651 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0"} err="failed to get container status \"702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\": rpc error: code = NotFound desc = could not find container \"702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0\": container with ID starting with 702334ffdc505e20457562697604e389478ee14c0d9148927a53d6979353cfa0 not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.204684 4884 scope.go:117] "RemoveContainer" containerID="793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.205301 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\": container with ID starting with 793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0 not found: ID does not exist" containerID="793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.205356 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0"} err="failed to get container status \"793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\": rpc error: code = NotFound desc = could not find container \"793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0\": container with ID starting with 793e2e8577c3692857379f7dec163574363f48f4ad638bd211e27f10987fabc0 not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.205709 4884 scope.go:117] "RemoveContainer" containerID="d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.206196 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\": container with ID starting with d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a not found: ID does not exist" containerID="d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.206237 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a"} err="failed to get container status \"d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\": rpc error: code = NotFound desc = could not find container \"d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a\": container with ID starting with d46075db4943147f75f659f49fc5e8bd4d3a159cef134f4fc76fb2ccefe56c7a not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.206264 4884 scope.go:117] "RemoveContainer" containerID="0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.206613 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\": container with ID starting with 0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17 not found: ID does not exist" containerID="0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.206659 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17"} err="failed to get container status \"0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\": rpc error: code = NotFound desc = could not find container \"0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17\": container with ID starting with 0829a8a8925b84803ed87f9c8c416737d5bc2ea8c5cc080241ac5ae29ad18c17 not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.206686 4884 scope.go:117] "RemoveContainer" containerID="d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e" Nov 28 15:23:12 crc kubenswrapper[4884]: E1128 15:23:12.209198 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\": container with ID starting with d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e not found: ID does not exist" containerID="d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.209236 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e"} err="failed to get container status \"d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\": rpc error: code = NotFound desc = could not find container \"d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e\": container with ID starting with d8a0f5c0ef398e04183e4d4a1603b22937484f7dfdf7f2f08d8fa670a8e8675e not found: ID does not exist" Nov 28 15:23:12 crc kubenswrapper[4884]: I1128 15:23:12.694590 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.967261 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.968189 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.969159 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.969934 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.970438 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:15 crc kubenswrapper[4884]: I1128 15:23:15.970496 4884 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 15:23:15 crc kubenswrapper[4884]: E1128 15:23:15.971070 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="200ms" Nov 28 15:23:16 crc kubenswrapper[4884]: E1128 15:23:16.172479 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="400ms" Nov 28 15:23:16 crc kubenswrapper[4884]: E1128 15:23:16.573207 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="800ms" Nov 28 15:23:17 crc kubenswrapper[4884]: E1128 15:23:17.373911 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="1.6s" Nov 28 15:23:18 crc kubenswrapper[4884]: E1128 15:23:18.975198 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.189:6443: connect: connection refused" interval="3.2s" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.688018 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.690323 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.690736 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.691079 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.711834 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.712337 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:19 crc kubenswrapper[4884]: E1128 15:23:19.712918 4884 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:19 crc kubenswrapper[4884]: I1128 15:23:19.713768 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.151643 4884 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="339c9174f76407a192e874715b8010a59711b9deb8c3e1069c0e5854aacb13dd" exitCode=0 Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.151798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"339c9174f76407a192e874715b8010a59711b9deb8c3e1069c0e5854aacb13dd"} Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.151973 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f5b432360af3d3b4e6e9a18d05d734af239fb87ecbe8b81cfa1d368c1e25d4ae"} Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.152573 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.152586 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.152667 4884 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:20 crc kubenswrapper[4884]: E1128 15:23:20.152896 4884 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.153224 4884 status_manager.go:851] "Failed to get status for pod" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:20 crc kubenswrapper[4884]: I1128 15:23:20.153600 4884 status_manager.go:851] "Failed to get status for pod" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" pod="openshift-marketplace/redhat-operators-jv7j2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-jv7j2\": dial tcp 38.102.83.189:6443: connect: connection refused" Nov 28 15:23:20 crc kubenswrapper[4884]: E1128 15:23:20.375529 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.189:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c34f6f111deb8 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,LastTimestamp:2025-11-28 15:23:09.23347116 +0000 UTC m=+228.796254981,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 15:23:21 crc kubenswrapper[4884]: I1128 15:23:21.161373 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ebf717858864c5fbf68e345a812b3ed5267a0222868706cbb2bc6cc83eb8f4bf"} Nov 28 15:23:21 crc kubenswrapper[4884]: I1128 15:23:21.161912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"77dbbcc301d9c05bf5a78630c4ea1adedf30a250726a0cd6b59bdcd6dbab35f4"} Nov 28 15:23:21 crc kubenswrapper[4884]: I1128 15:23:21.161925 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"28324243c8d981d864507d145a345c5d6835d79438dc9a37f350974f4d717bda"} Nov 28 15:23:21 crc kubenswrapper[4884]: I1128 15:23:21.161933 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"45539388e4678df65922f560cad5bfdc71b573624998d60c8a4c2d201ba3a6e5"} Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.612912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a96cde308526f88824b8e5b28f053e73f772bad6dc82a799fa55224599cfc754"} Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.613366 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.613241 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.613394 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.615676 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.615733 4884 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb" exitCode=1 Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.615767 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb"} Nov 28 15:23:23 crc kubenswrapper[4884]: I1128 15:23:23.616325 4884 scope.go:117] "RemoveContainer" containerID="0d44da1d28ea0b885a438c102af610201b5f92911e48a40a8a3bdbf9beff35fb" Nov 28 15:23:24 crc kubenswrapper[4884]: I1128 15:23:24.626486 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 15:23:24 crc kubenswrapper[4884]: I1128 15:23:24.626798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"31313d9e57bf37efeca8f7559cc67fbc0e51f355d1fc867a395cebe0b2b99fde"} Nov 28 15:23:24 crc kubenswrapper[4884]: I1128 15:23:24.714360 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:24 crc kubenswrapper[4884]: I1128 15:23:24.714475 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:24 crc kubenswrapper[4884]: I1128 15:23:24.719740 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:26 crc kubenswrapper[4884]: I1128 15:23:26.946223 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:23:26 crc kubenswrapper[4884]: I1128 15:23:26.946465 4884 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 15:23:26 crc kubenswrapper[4884]: I1128 15:23:26.946513 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.528474 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.622845 4884 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.651384 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.651438 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.658462 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:28 crc kubenswrapper[4884]: I1128 15:23:28.660859 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b7e2430b-eade-4d2a-8a07-29bd101b7f31" Nov 28 15:23:29 crc kubenswrapper[4884]: I1128 15:23:29.352192 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerName="oauth-openshift" containerID="cri-o://83b3d69f435aceb1a78c29e5ca951cc929b4bb8c10ef6675aa820ccd81afc6e7" gracePeriod=15 Nov 28 15:23:29 crc kubenswrapper[4884]: I1128 15:23:29.661032 4884 generic.go:334] "Generic (PLEG): container finished" podID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerID="83b3d69f435aceb1a78c29e5ca951cc929b4bb8c10ef6675aa820ccd81afc6e7" exitCode=0 Nov 28 15:23:29 crc kubenswrapper[4884]: I1128 15:23:29.661160 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" event={"ID":"bfa540e4-eea8-4206-b872-5e42d80e017c","Type":"ContainerDied","Data":"83b3d69f435aceb1a78c29e5ca951cc929b4bb8c10ef6675aa820ccd81afc6e7"} Nov 28 15:23:29 crc kubenswrapper[4884]: I1128 15:23:29.661341 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:29 crc kubenswrapper[4884]: I1128 15:23:29.661358 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="0df18773-f995-4e08-bcd0-81350c8d83ae" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.254943 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373771 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373825 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373845 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373867 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rscdf\" (UniqueName: \"kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373915 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373934 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.373985 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374025 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374068 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374085 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374131 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374150 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374173 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login\") pod \"bfa540e4-eea8-4206-b872-5e42d80e017c\" (UID: \"bfa540e4-eea8-4206-b872-5e42d80e017c\") " Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.374244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.375228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.375590 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.375942 4884 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.375962 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.375973 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.376029 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.376008 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.381655 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.381603 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf" (OuterVolumeSpecName: "kube-api-access-rscdf") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "kube-api-access-rscdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.383431 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.383894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.384339 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.384684 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.384999 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.387824 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.396058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "bfa540e4-eea8-4206-b872-5e42d80e017c" (UID: "bfa540e4-eea8-4206-b872-5e42d80e017c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477515 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477547 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477563 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477576 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477588 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477600 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477611 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477623 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477634 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477646 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rscdf\" (UniqueName: \"kubernetes.io/projected/bfa540e4-eea8-4206-b872-5e42d80e017c-kube-api-access-rscdf\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.477658 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/bfa540e4-eea8-4206-b872-5e42d80e017c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.671545 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" event={"ID":"bfa540e4-eea8-4206-b872-5e42d80e017c","Type":"ContainerDied","Data":"ff74b7c8b958bafa023520630001eb735be0089e3c1b01a4c250bfc4ecfe5c1e"} Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.671614 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4449t" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.671654 4884 scope.go:117] "RemoveContainer" containerID="83b3d69f435aceb1a78c29e5ca951cc929b4bb8c10ef6675aa820ccd81afc6e7" Nov 28 15:23:30 crc kubenswrapper[4884]: I1128 15:23:30.723809 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="b7e2430b-eade-4d2a-8a07-29bd101b7f31" Nov 28 15:23:34 crc kubenswrapper[4884]: I1128 15:23:34.846744 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 15:23:35 crc kubenswrapper[4884]: I1128 15:23:35.099049 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 15:23:35 crc kubenswrapper[4884]: I1128 15:23:35.159777 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 15:23:35 crc kubenswrapper[4884]: I1128 15:23:35.489390 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 15:23:35 crc kubenswrapper[4884]: I1128 15:23:35.558481 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 15:23:36 crc kubenswrapper[4884]: I1128 15:23:36.258975 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 15:23:36 crc kubenswrapper[4884]: I1128 15:23:36.507603 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 15:23:36 crc kubenswrapper[4884]: I1128 15:23:36.936689 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 15:23:36 crc kubenswrapper[4884]: I1128 15:23:36.947253 4884 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 15:23:36 crc kubenswrapper[4884]: I1128 15:23:36.947346 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 15:23:37 crc kubenswrapper[4884]: I1128 15:23:37.035886 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 15:23:37 crc kubenswrapper[4884]: I1128 15:23:37.531817 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 15:23:37 crc kubenswrapper[4884]: I1128 15:23:37.609055 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 15:23:37 crc kubenswrapper[4884]: I1128 15:23:37.645870 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 15:23:37 crc kubenswrapper[4884]: I1128 15:23:37.737006 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 15:23:38 crc kubenswrapper[4884]: I1128 15:23:38.690047 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 15:23:38 crc kubenswrapper[4884]: I1128 15:23:38.903774 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.040572 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.149533 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.340797 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.351191 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.382082 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.563877 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.769228 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.880347 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 15:23:39 crc kubenswrapper[4884]: I1128 15:23:39.969945 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.104433 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.284636 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.310317 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.318795 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.346352 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.383540 4884 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.412205 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.485410 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.490329 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.516832 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.678335 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 15:23:40 crc kubenswrapper[4884]: I1128 15:23:40.891500 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.000760 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.039422 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.187046 4884 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.783660 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.808933 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 15:23:41 crc kubenswrapper[4884]: I1128 15:23:41.918582 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.062884 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.240194 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.258805 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.548545 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.610206 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.615010 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.699433 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.715037 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.753126 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 15:23:42 crc kubenswrapper[4884]: I1128 15:23:42.882668 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.088999 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.377137 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.379960 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.406862 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.441577 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.517401 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.561668 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.631064 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.637120 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.644716 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.727540 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.779236 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.888249 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.929718 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.966724 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 15:23:43 crc kubenswrapper[4884]: I1128 15:23:43.985557 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.004271 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.043178 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.093688 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.150450 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.217028 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.224414 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.274067 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.299728 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.350363 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.353138 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.360603 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.405278 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.427739 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.457010 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.674145 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.675637 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 15:23:44 crc kubenswrapper[4884]: I1128 15:23:44.801638 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.106694 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.131224 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.183621 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.248672 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.335603 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.350477 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.351585 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.440972 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.534588 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.569462 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.575275 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.702025 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.940543 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 15:23:45 crc kubenswrapper[4884]: I1128 15:23:45.987976 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.008150 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.056330 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.063318 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.078062 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.101863 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.190664 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.191802 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.212851 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.302250 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.341000 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.422977 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.551897 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.571377 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.578938 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.579407 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.588540 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.606637 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.619624 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.701122 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.781919 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.800431 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.812992 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.906217 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.919218 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.952454 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.957519 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:23:46 crc kubenswrapper[4884]: I1128 15:23:46.965075 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.027796 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.101702 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.160281 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.341474 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.422806 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.447830 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.462513 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.470559 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.473402 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.516433 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.589724 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.596849 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.604223 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.608242 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.612022 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.665897 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.670148 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 15:23:47 crc kubenswrapper[4884]: I1128 15:23:47.762706 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.001865 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.158076 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.242442 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.249191 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.295550 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.311481 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.361031 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.382384 4884 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.398806 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.447783 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.448066 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.497513 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.561040 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.716502 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.798608 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.882601 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.934482 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 15:23:48 crc kubenswrapper[4884]: I1128 15:23:48.973716 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.110210 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.145916 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.237823 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.248589 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.304600 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.340264 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.372052 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.377196 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.629079 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.634620 4884 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.739169 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.822741 4884 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.830052 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=41.830022033 podStartE2EDuration="41.830022033s" podCreationTimestamp="2025-11-28 15:23:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:23:28.239603138 +0000 UTC m=+247.802386939" watchObservedRunningTime="2025-11-28 15:23:49.830022033 +0000 UTC m=+269.392805874" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.832276 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/redhat-operators-jv7j2","openshift-authentication/oauth-openshift-558db77b4-4449t"] Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.832363 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-7f687b986-777hq"] Nov 28 15:23:49 crc kubenswrapper[4884]: E1128 15:23:49.832726 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerName="oauth-openshift" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.832766 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerName="oauth-openshift" Nov 28 15:23:49 crc kubenswrapper[4884]: E1128 15:23:49.832808 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" containerName="installer" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.832823 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" containerName="installer" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.832993 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f1f9e01-a0ef-48d8-9f42-dcb87c799e5f" containerName="installer" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.833024 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" containerName="oauth-openshift" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.833693 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.839486 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.839597 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.839499 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.839863 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.839984 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.840150 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.840505 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.840538 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.841211 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.841492 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.841517 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.844260 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.848845 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.851804 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.858915 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.863682 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.870753 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.874145 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.874130513 podStartE2EDuration="21.874130513s" podCreationTimestamp="2025-11-28 15:23:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:23:49.872388907 +0000 UTC m=+269.435172728" watchObservedRunningTime="2025-11-28 15:23:49.874130513 +0000 UTC m=+269.436914314" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.882259 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924751 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924803 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkvk7\" (UniqueName: \"kubernetes.io/projected/6ba420f4-673f-44b1-9ad4-526b4497307f-kube-api-access-gkvk7\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924886 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924911 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-policies\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924932 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-session\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924960 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.924997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-login\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.925024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-error\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.925047 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.925066 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.925104 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.925123 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-dir\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:49 crc kubenswrapper[4884]: I1128 15:23:49.974219 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.026784 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkvk7\" (UniqueName: \"kubernetes.io/projected/6ba420f4-673f-44b1-9ad4-526b4497307f-kube-api-access-gkvk7\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.026890 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.026939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-policies\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.026989 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-session\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027081 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027184 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-login\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027329 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-error\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027443 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027546 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-dir\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027637 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.027676 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.033207 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-dir\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.034786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-audit-policies\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.035162 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.035189 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.035255 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-service-ca\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.037626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.037757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.037816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-router-certs\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.039140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-error\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.039591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.041419 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-login\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.043423 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-system-session\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.044623 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6ba420f4-673f-44b1-9ad4-526b4497307f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.056613 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkvk7\" (UniqueName: \"kubernetes.io/projected/6ba420f4-673f-44b1-9ad4-526b4497307f-kube-api-access-gkvk7\") pod \"oauth-openshift-7f687b986-777hq\" (UID: \"6ba420f4-673f-44b1-9ad4-526b4497307f\") " pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.095202 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.114184 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.159917 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.197368 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.244733 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.412330 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.432711 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7f687b986-777hq"] Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.451197 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.476145 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.548356 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.606352 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.627206 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.696155 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e7cdd36-55ff-41fd-bf5f-393429f8470b" path="/var/lib/kubelet/pods/6e7cdd36-55ff-41fd-bf5f-393429f8470b/volumes" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.697053 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfa540e4-eea8-4206-b872-5e42d80e017c" path="/var/lib/kubelet/pods/bfa540e4-eea8-4206-b872-5e42d80e017c/volumes" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.698403 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.746965 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.796661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" event={"ID":"6ba420f4-673f-44b1-9ad4-526b4497307f","Type":"ContainerStarted","Data":"736c3d1a01ab525793ea54f5ff04fccfcb6439ed2f9c858d1f53c183ab1b2845"} Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.796725 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" event={"ID":"6ba420f4-673f-44b1-9ad4-526b4497307f","Type":"ContainerStarted","Data":"1d6d90018df9bf86588e3c0ea694748842eadde8589a216f239a031c6ef3eba0"} Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.797047 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.821513 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" podStartSLOduration=46.821495046 podStartE2EDuration="46.821495046s" podCreationTimestamp="2025-11-28 15:23:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:23:50.818839186 +0000 UTC m=+270.381623007" watchObservedRunningTime="2025-11-28 15:23:50.821495046 +0000 UTC m=+270.384278847" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.823370 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.911031 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 15:23:50 crc kubenswrapper[4884]: I1128 15:23:50.996680 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.030983 4884 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.031332 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90" gracePeriod=5 Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.032590 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.099871 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.124353 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.160694 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.267733 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.315036 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.320078 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.510308 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.524743 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.580459 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.610410 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.652082 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.664817 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.714021 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.803570 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.820976 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.868617 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 15:23:51 crc kubenswrapper[4884]: I1128 15:23:51.870191 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.163590 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.214648 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.269431 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.345874 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.409829 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.445276 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.471425 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.526192 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.885386 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 15:23:52 crc kubenswrapper[4884]: I1128 15:23:52.960462 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.034429 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.063827 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.105956 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.129079 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.310834 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.465512 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.736880 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.880518 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.930789 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 15:23:53 crc kubenswrapper[4884]: I1128 15:23:53.940309 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 15:23:54 crc kubenswrapper[4884]: I1128 15:23:54.132855 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 15:23:54 crc kubenswrapper[4884]: I1128 15:23:54.374467 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 15:23:54 crc kubenswrapper[4884]: I1128 15:23:54.391977 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 15:23:54 crc kubenswrapper[4884]: I1128 15:23:54.411345 4884 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 15:23:54 crc kubenswrapper[4884]: I1128 15:23:54.541711 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 15:23:55 crc kubenswrapper[4884]: I1128 15:23:55.211390 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 15:23:55 crc kubenswrapper[4884]: I1128 15:23:55.248742 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 15:23:55 crc kubenswrapper[4884]: I1128 15:23:55.388241 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 15:23:55 crc kubenswrapper[4884]: I1128 15:23:55.668321 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.393074 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.624319 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.624675 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.699367 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.717178 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.717234 4884 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="9371fc69-bdf3-47b5-93b7-465e115e0550" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.722697 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.722751 4884 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="9371fc69-bdf3-47b5-93b7-465e115e0550" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.729879 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.729993 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730068 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730184 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730611 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730676 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730711 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.730744 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.752701 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.831965 4884 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.832070 4884 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.832146 4884 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.832166 4884 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.832183 4884 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.835476 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.835542 4884 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90" exitCode=137 Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.835593 4884 scope.go:117] "RemoveContainer" containerID="1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.835672 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.854494 4884 scope.go:117] "RemoveContainer" containerID="1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90" Nov 28 15:23:56 crc kubenswrapper[4884]: E1128 15:23:56.854932 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90\": container with ID starting with 1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90 not found: ID does not exist" containerID="1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90" Nov 28 15:23:56 crc kubenswrapper[4884]: I1128 15:23:56.854968 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90"} err="failed to get container status \"1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90\": rpc error: code = NotFound desc = could not find container \"1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90\": container with ID starting with 1563768461a6a0af3fdcc0ec295ae769bf805ba181f88b15ce92338d8ba06d90 not found: ID does not exist" Nov 28 15:23:58 crc kubenswrapper[4884]: I1128 15:23:58.695704 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 15:24:11 crc kubenswrapper[4884]: I1128 15:24:11.921992 4884 generic.go:334] "Generic (PLEG): container finished" podID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerID="514e6e98b772c23c77c07f2f359d6edc5c466870647d09c9e9edf5f48fa565c1" exitCode=0 Nov 28 15:24:11 crc kubenswrapper[4884]: I1128 15:24:11.922218 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerDied","Data":"514e6e98b772c23c77c07f2f359d6edc5c466870647d09c9e9edf5f48fa565c1"} Nov 28 15:24:11 crc kubenswrapper[4884]: I1128 15:24:11.923201 4884 scope.go:117] "RemoveContainer" containerID="514e6e98b772c23c77c07f2f359d6edc5c466870647d09c9e9edf5f48fa565c1" Nov 28 15:24:12 crc kubenswrapper[4884]: I1128 15:24:12.930164 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerStarted","Data":"107432f3db8b9c5bca1c6b1d98ab5fd4b22f20bb612fc4b257367cfcecc5e2f0"} Nov 28 15:24:12 crc kubenswrapper[4884]: I1128 15:24:12.931102 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:24:12 crc kubenswrapper[4884]: I1128 15:24:12.932455 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.032771 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.034797 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" podUID="9051d787-06db-42b2-846a-231f40dc737c" containerName="controller-manager" containerID="cri-o://87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8" gracePeriod=30 Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.128659 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.128854 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" containerName="route-controller-manager" containerID="cri-o://4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca" gracePeriod=30 Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.399683 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.467145 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:24:23 crc kubenswrapper[4884]: E1128 15:24:23.467398 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.467418 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:24:23 crc kubenswrapper[4884]: E1128 15:24:23.467433 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9051d787-06db-42b2-846a-231f40dc737c" containerName="controller-manager" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.467440 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9051d787-06db-42b2-846a-231f40dc737c" containerName="controller-manager" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.467641 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9051d787-06db-42b2-846a-231f40dc737c" containerName="controller-manager" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.467667 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.468203 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.483443 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.495558 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca\") pod \"9051d787-06db-42b2-846a-231f40dc737c\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.495655 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config\") pod \"9051d787-06db-42b2-846a-231f40dc737c\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.495745 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles\") pod \"9051d787-06db-42b2-846a-231f40dc737c\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.495774 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wncj2\" (UniqueName: \"kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2\") pod \"9051d787-06db-42b2-846a-231f40dc737c\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.495878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert\") pod \"9051d787-06db-42b2-846a-231f40dc737c\" (UID: \"9051d787-06db-42b2-846a-231f40dc737c\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.496393 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca" (OuterVolumeSpecName: "client-ca") pod "9051d787-06db-42b2-846a-231f40dc737c" (UID: "9051d787-06db-42b2-846a-231f40dc737c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.496453 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9051d787-06db-42b2-846a-231f40dc737c" (UID: "9051d787-06db-42b2-846a-231f40dc737c"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.496525 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.496543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config" (OuterVolumeSpecName: "config") pod "9051d787-06db-42b2-846a-231f40dc737c" (UID: "9051d787-06db-42b2-846a-231f40dc737c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.504500 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9051d787-06db-42b2-846a-231f40dc737c" (UID: "9051d787-06db-42b2-846a-231f40dc737c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.504625 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2" (OuterVolumeSpecName: "kube-api-access-wncj2") pod "9051d787-06db-42b2-846a-231f40dc737c" (UID: "9051d787-06db-42b2-846a-231f40dc737c"). InnerVolumeSpecName "kube-api-access-wncj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.596767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca\") pod \"50b0307c-e145-43ae-b97a-207ff99980a5\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.596825 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnnpg\" (UniqueName: \"kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg\") pod \"50b0307c-e145-43ae-b97a-207ff99980a5\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.596872 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config\") pod \"50b0307c-e145-43ae-b97a-207ff99980a5\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.596958 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert\") pod \"50b0307c-e145-43ae-b97a-207ff99980a5\" (UID: \"50b0307c-e145-43ae-b97a-207ff99980a5\") " Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597120 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597166 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7cxq\" (UniqueName: \"kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597187 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597483 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597828 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597860 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wncj2\" (UniqueName: \"kubernetes.io/projected/9051d787-06db-42b2-846a-231f40dc737c-kube-api-access-wncj2\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597873 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9051d787-06db-42b2-846a-231f40dc737c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597881 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597889 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9051d787-06db-42b2-846a-231f40dc737c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.597978 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config" (OuterVolumeSpecName: "config") pod "50b0307c-e145-43ae-b97a-207ff99980a5" (UID: "50b0307c-e145-43ae-b97a-207ff99980a5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.598186 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca" (OuterVolumeSpecName: "client-ca") pod "50b0307c-e145-43ae-b97a-207ff99980a5" (UID: "50b0307c-e145-43ae-b97a-207ff99980a5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.601335 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "50b0307c-e145-43ae-b97a-207ff99980a5" (UID: "50b0307c-e145-43ae-b97a-207ff99980a5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.601377 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg" (OuterVolumeSpecName: "kube-api-access-tnnpg") pod "50b0307c-e145-43ae-b97a-207ff99980a5" (UID: "50b0307c-e145-43ae-b97a-207ff99980a5"). InnerVolumeSpecName "kube-api-access-tnnpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.698475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.698945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7cxq\" (UniqueName: \"kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.699659 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700452 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700602 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700697 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnnpg\" (UniqueName: \"kubernetes.io/projected/50b0307c-e145-43ae-b97a-207ff99980a5-kube-api-access-tnnpg\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700786 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50b0307c-e145-43ae-b97a-207ff99980a5-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.700850 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50b0307c-e145-43ae-b97a-207ff99980a5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.701565 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.701985 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.702002 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.708452 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.720249 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7cxq\" (UniqueName: \"kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq\") pod \"controller-manager-5bcc4c77b6-qtzh5\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:23 crc kubenswrapper[4884]: I1128 15:24:23.809682 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.009118 4884 generic.go:334] "Generic (PLEG): container finished" podID="50b0307c-e145-43ae-b97a-207ff99980a5" containerID="4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca" exitCode=0 Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.009209 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.011113 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" event={"ID":"50b0307c-e145-43ae-b97a-207ff99980a5","Type":"ContainerDied","Data":"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca"} Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.011146 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp" event={"ID":"50b0307c-e145-43ae-b97a-207ff99980a5","Type":"ContainerDied","Data":"9f6fb0155df5d61d7e403f0564d4b9bc73c32c725d7dbf8616e0a5f5f1b027dc"} Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.011162 4884 scope.go:117] "RemoveContainer" containerID="4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.013453 4884 generic.go:334] "Generic (PLEG): container finished" podID="9051d787-06db-42b2-846a-231f40dc737c" containerID="87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8" exitCode=0 Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.013490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" event={"ID":"9051d787-06db-42b2-846a-231f40dc737c","Type":"ContainerDied","Data":"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8"} Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.013519 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" event={"ID":"9051d787-06db-42b2-846a-231f40dc737c","Type":"ContainerDied","Data":"e199d95b5850977db58020875424756736d9c7b7c923d2726f1b10ad0f2790a3"} Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.013586 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-2k4vg" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.029415 4884 scope.go:117] "RemoveContainer" containerID="4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca" Nov 28 15:24:24 crc kubenswrapper[4884]: E1128 15:24:24.034879 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca\": container with ID starting with 4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca not found: ID does not exist" containerID="4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.035076 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca"} err="failed to get container status \"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca\": rpc error: code = NotFound desc = could not find container \"4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca\": container with ID starting with 4efb5accc3257111285ae2f8df9ae1c57de610e3956fa5553d443162a6426eca not found: ID does not exist" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.035156 4884 scope.go:117] "RemoveContainer" containerID="87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.045982 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.068683 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.072335 4884 scope.go:117] "RemoveContainer" containerID="87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8" Nov 28 15:24:24 crc kubenswrapper[4884]: E1128 15:24:24.073899 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8\": container with ID starting with 87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8 not found: ID does not exist" containerID="87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.073959 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8"} err="failed to get container status \"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8\": rpc error: code = NotFound desc = could not find container \"87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8\": container with ID starting with 87a881d8cd29b159f096d5d6fae0c36b883489578ab1de32a80d8d8d034d6fc8 not found: ID does not exist" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.085692 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-2k4vg"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.092658 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.097036 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vs6bp"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.701815 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" path="/var/lib/kubelet/pods/50b0307c-e145-43ae-b97a-207ff99980a5/volumes" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.702884 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9051d787-06db-42b2-846a-231f40dc737c" path="/var/lib/kubelet/pods/9051d787-06db-42b2-846a-231f40dc737c/volumes" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.771390 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:24 crc kubenswrapper[4884]: E1128 15:24:24.771573 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" containerName="route-controller-manager" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.771585 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" containerName="route-controller-manager" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.771693 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="50b0307c-e145-43ae-b97a-207ff99980a5" containerName="route-controller-manager" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.772009 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.774582 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.776952 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.777081 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.777347 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.777570 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.777588 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.781859 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.816284 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb2pj\" (UniqueName: \"kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.816394 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.817270 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.817417 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.918951 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb2pj\" (UniqueName: \"kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.919037 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.919066 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.919130 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.920279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.920814 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.926871 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:24 crc kubenswrapper[4884]: I1128 15:24:24.940654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb2pj\" (UniqueName: \"kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj\") pod \"route-controller-manager-6775b5b864-bj5zd\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.021316 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" event={"ID":"6f92c9d3-00f7-436f-a369-2ee4979af697","Type":"ContainerStarted","Data":"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c"} Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.021653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" event={"ID":"6f92c9d3-00f7-436f-a369-2ee4979af697","Type":"ContainerStarted","Data":"b661c4de79ede4e97a0ce4a7feab163f9bdf503d10ee918dccb36d3fcde97f8e"} Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.022315 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.026965 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.044381 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" podStartSLOduration=2.044351433 podStartE2EDuration="2.044351433s" podCreationTimestamp="2025-11-28 15:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:24:25.038625332 +0000 UTC m=+304.601409143" watchObservedRunningTime="2025-11-28 15:24:25.044351433 +0000 UTC m=+304.607135274" Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.095210 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:25 crc kubenswrapper[4884]: I1128 15:24:25.276647 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:26 crc kubenswrapper[4884]: I1128 15:24:26.027552 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" event={"ID":"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6","Type":"ContainerStarted","Data":"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3"} Nov 28 15:24:26 crc kubenswrapper[4884]: I1128 15:24:26.027927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" event={"ID":"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6","Type":"ContainerStarted","Data":"f8e7e89624055d532260887924e953242057a8fc24b0f7791a2380f5b0b77fde"} Nov 28 15:24:27 crc kubenswrapper[4884]: I1128 15:24:27.034351 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:27 crc kubenswrapper[4884]: I1128 15:24:27.042282 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:27 crc kubenswrapper[4884]: I1128 15:24:27.062323 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" podStartSLOduration=4.06230182 podStartE2EDuration="4.06230182s" podCreationTimestamp="2025-11-28 15:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:24:26.041887726 +0000 UTC m=+305.604671527" watchObservedRunningTime="2025-11-28 15:24:27.06230182 +0000 UTC m=+306.625085631" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.024604 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.026183 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" podUID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" containerName="route-controller-manager" containerID="cri-o://098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3" gracePeriod=30 Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.473599 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.563558 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca\") pod \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.563605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb2pj\" (UniqueName: \"kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj\") pod \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.563628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert\") pod \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.563678 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config\") pod \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\" (UID: \"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6\") " Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.565275 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca" (OuterVolumeSpecName: "client-ca") pod "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" (UID: "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.567167 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config" (OuterVolumeSpecName: "config") pod "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" (UID: "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.570798 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj" (OuterVolumeSpecName: "kube-api-access-rb2pj") pod "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" (UID: "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6"). InnerVolumeSpecName "kube-api-access-rb2pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.576451 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" (UID: "c63c5640-56af-4bf1-b5bd-971f3ecd4fb6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.665976 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.666064 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb2pj\" (UniqueName: \"kubernetes.io/projected/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-kube-api-access-rb2pj\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.666131 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:43 crc kubenswrapper[4884]: I1128 15:24:43.666160 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.134392 4884 generic.go:334] "Generic (PLEG): container finished" podID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" containerID="098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3" exitCode=0 Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.134460 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" event={"ID":"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6","Type":"ContainerDied","Data":"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3"} Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.134495 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" event={"ID":"c63c5640-56af-4bf1-b5bd-971f3ecd4fb6","Type":"ContainerDied","Data":"f8e7e89624055d532260887924e953242057a8fc24b0f7791a2380f5b0b77fde"} Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.134522 4884 scope.go:117] "RemoveContainer" containerID="098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.134671 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.160622 4884 scope.go:117] "RemoveContainer" containerID="098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3" Nov 28 15:24:44 crc kubenswrapper[4884]: E1128 15:24:44.161600 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3\": container with ID starting with 098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3 not found: ID does not exist" containerID="098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.161674 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3"} err="failed to get container status \"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3\": rpc error: code = NotFound desc = could not find container \"098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3\": container with ID starting with 098cee0b6516b0c497ea94d02ed11776caf7186eba89b7c9412bab6acbf5e3a3 not found: ID does not exist" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.186482 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.197359 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6775b5b864-bj5zd"] Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.699914 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" path="/var/lib/kubelet/pods/c63c5640-56af-4bf1-b5bd-971f3ecd4fb6/volumes" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.787858 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g"] Nov 28 15:24:44 crc kubenswrapper[4884]: E1128 15:24:44.788818 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" containerName="route-controller-manager" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.788844 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" containerName="route-controller-manager" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.789009 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c63c5640-56af-4bf1-b5bd-971f3ecd4fb6" containerName="route-controller-manager" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.789682 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.793380 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.793786 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.796363 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.796428 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.796446 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.796506 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.807690 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g"] Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.881683 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t4hq\" (UniqueName: \"kubernetes.io/projected/bfefa5cb-f325-45ae-99fa-9cee32566e96-kube-api-access-6t4hq\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.881791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-config\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.881852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-client-ca\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.881915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfefa5cb-f325-45ae-99fa-9cee32566e96-serving-cert\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.983665 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t4hq\" (UniqueName: \"kubernetes.io/projected/bfefa5cb-f325-45ae-99fa-9cee32566e96-kube-api-access-6t4hq\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.983760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-config\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.983800 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-client-ca\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.983844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfefa5cb-f325-45ae-99fa-9cee32566e96-serving-cert\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.987439 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-client-ca\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.988695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfefa5cb-f325-45ae-99fa-9cee32566e96-config\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:44 crc kubenswrapper[4884]: I1128 15:24:44.998426 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bfefa5cb-f325-45ae-99fa-9cee32566e96-serving-cert\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:45 crc kubenswrapper[4884]: I1128 15:24:45.008367 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t4hq\" (UniqueName: \"kubernetes.io/projected/bfefa5cb-f325-45ae-99fa-9cee32566e96-kube-api-access-6t4hq\") pod \"route-controller-manager-5d5ff78b58-5xn7g\" (UID: \"bfefa5cb-f325-45ae-99fa-9cee32566e96\") " pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:45 crc kubenswrapper[4884]: I1128 15:24:45.116995 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:45 crc kubenswrapper[4884]: I1128 15:24:45.524226 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g"] Nov 28 15:24:46 crc kubenswrapper[4884]: I1128 15:24:46.155884 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" event={"ID":"bfefa5cb-f325-45ae-99fa-9cee32566e96","Type":"ContainerStarted","Data":"b72d0b0ff5d9c3ecca7a94386ced248a2df030cc06d98c93f0599dfd774c8bd2"} Nov 28 15:24:46 crc kubenswrapper[4884]: I1128 15:24:46.156315 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:46 crc kubenswrapper[4884]: I1128 15:24:46.156405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" event={"ID":"bfefa5cb-f325-45ae-99fa-9cee32566e96","Type":"ContainerStarted","Data":"c74cd53e59512a264c8213f4f7c5d648abc8915dafce9164d651ef1c4f844062"} Nov 28 15:24:46 crc kubenswrapper[4884]: I1128 15:24:46.162054 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" Nov 28 15:24:46 crc kubenswrapper[4884]: I1128 15:24:46.180105 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5d5ff78b58-5xn7g" podStartSLOduration=3.180071376 podStartE2EDuration="3.180071376s" podCreationTimestamp="2025-11-28 15:24:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:24:46.176958834 +0000 UTC m=+325.739742635" watchObservedRunningTime="2025-11-28 15:24:46.180071376 +0000 UTC m=+325.742855187" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.035867 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-52w4z"] Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.037236 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.050567 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-52w4z"] Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199217 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bxq8\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-kube-api-access-8bxq8\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199307 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199358 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-bound-sa-token\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-certificates\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199417 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56fc05aa-a14a-46d8-b0f1-d2230f46db87-ca-trust-extracted\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199433 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-trusted-ca\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199614 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56fc05aa-a14a-46d8-b0f1-d2230f46db87-installation-pull-secrets\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.199648 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-tls\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.230480 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301275 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56fc05aa-a14a-46d8-b0f1-d2230f46db87-ca-trust-extracted\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-trusted-ca\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56fc05aa-a14a-46d8-b0f1-d2230f46db87-installation-pull-secrets\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-tls\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bxq8\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-kube-api-access-8bxq8\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-bound-sa-token\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301469 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-certificates\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.301753 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/56fc05aa-a14a-46d8-b0f1-d2230f46db87-ca-trust-extracted\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.302757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-trusted-ca\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.303630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-certificates\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.307657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/56fc05aa-a14a-46d8-b0f1-d2230f46db87-installation-pull-secrets\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.314548 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-registry-tls\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.326544 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-bound-sa-token\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.327238 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bxq8\" (UniqueName: \"kubernetes.io/projected/56fc05aa-a14a-46d8-b0f1-d2230f46db87-kube-api-access-8bxq8\") pod \"image-registry-66df7c8f76-52w4z\" (UID: \"56fc05aa-a14a-46d8-b0f1-d2230f46db87\") " pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.356236 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:09 crc kubenswrapper[4884]: I1128 15:25:09.749871 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-52w4z"] Nov 28 15:25:09 crc kubenswrapper[4884]: W1128 15:25:09.762925 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56fc05aa_a14a_46d8_b0f1_d2230f46db87.slice/crio-2b131f764432c136c4d80e6099a433ca02382d732b59b494b5b1ae821d7ba5ac WatchSource:0}: Error finding container 2b131f764432c136c4d80e6099a433ca02382d732b59b494b5b1ae821d7ba5ac: Status 404 returned error can't find the container with id 2b131f764432c136c4d80e6099a433ca02382d732b59b494b5b1ae821d7ba5ac Nov 28 15:25:10 crc kubenswrapper[4884]: I1128 15:25:10.303179 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" event={"ID":"56fc05aa-a14a-46d8-b0f1-d2230f46db87","Type":"ContainerStarted","Data":"0623270dccfc0970fb5618623cb05c2fc3daf31eba77e410dae6f9467d955826"} Nov 28 15:25:10 crc kubenswrapper[4884]: I1128 15:25:10.304361 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:10 crc kubenswrapper[4884]: I1128 15:25:10.304476 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" event={"ID":"56fc05aa-a14a-46d8-b0f1-d2230f46db87","Type":"ContainerStarted","Data":"2b131f764432c136c4d80e6099a433ca02382d732b59b494b5b1ae821d7ba5ac"} Nov 28 15:25:10 crc kubenswrapper[4884]: I1128 15:25:10.325014 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" podStartSLOduration=1.324988373 podStartE2EDuration="1.324988373s" podCreationTimestamp="2025-11-28 15:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:10.323911414 +0000 UTC m=+349.886695215" watchObservedRunningTime="2025-11-28 15:25:10.324988373 +0000 UTC m=+349.887772214" Nov 28 15:25:21 crc kubenswrapper[4884]: I1128 15:25:21.243502 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:25:21 crc kubenswrapper[4884]: I1128 15:25:21.244325 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.030138 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.030787 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" podUID="6f92c9d3-00f7-436f-a369-2ee4979af697" containerName="controller-manager" containerID="cri-o://35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c" gracePeriod=30 Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.383032 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.386476 4884 generic.go:334] "Generic (PLEG): container finished" podID="6f92c9d3-00f7-436f-a369-2ee4979af697" containerID="35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c" exitCode=0 Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.386509 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" event={"ID":"6f92c9d3-00f7-436f-a369-2ee4979af697","Type":"ContainerDied","Data":"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c"} Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.386535 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" event={"ID":"6f92c9d3-00f7-436f-a369-2ee4979af697","Type":"ContainerDied","Data":"b661c4de79ede4e97a0ce4a7feab163f9bdf503d10ee918dccb36d3fcde97f8e"} Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.386554 4884 scope.go:117] "RemoveContainer" containerID="35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.386596 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.412471 4884 scope.go:117] "RemoveContainer" containerID="35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c" Nov 28 15:25:23 crc kubenswrapper[4884]: E1128 15:25:23.412884 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c\": container with ID starting with 35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c not found: ID does not exist" containerID="35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.412926 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c"} err="failed to get container status \"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c\": rpc error: code = NotFound desc = could not find container \"35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c\": container with ID starting with 35e9aa7d02d121d179845501d9c8449d1deedc84e32bcae59fc64a292065d06c not found: ID does not exist" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.516948 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles\") pod \"6f92c9d3-00f7-436f-a369-2ee4979af697\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.517252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config\") pod \"6f92c9d3-00f7-436f-a369-2ee4979af697\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.517336 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert\") pod \"6f92c9d3-00f7-436f-a369-2ee4979af697\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.517361 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7cxq\" (UniqueName: \"kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq\") pod \"6f92c9d3-00f7-436f-a369-2ee4979af697\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.517428 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca\") pod \"6f92c9d3-00f7-436f-a369-2ee4979af697\" (UID: \"6f92c9d3-00f7-436f-a369-2ee4979af697\") " Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.518948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca" (OuterVolumeSpecName: "client-ca") pod "6f92c9d3-00f7-436f-a369-2ee4979af697" (UID: "6f92c9d3-00f7-436f-a369-2ee4979af697"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.519959 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config" (OuterVolumeSpecName: "config") pod "6f92c9d3-00f7-436f-a369-2ee4979af697" (UID: "6f92c9d3-00f7-436f-a369-2ee4979af697"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.526543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6f92c9d3-00f7-436f-a369-2ee4979af697" (UID: "6f92c9d3-00f7-436f-a369-2ee4979af697"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.527038 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6f92c9d3-00f7-436f-a369-2ee4979af697" (UID: "6f92c9d3-00f7-436f-a369-2ee4979af697"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.532616 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq" (OuterVolumeSpecName: "kube-api-access-z7cxq") pod "6f92c9d3-00f7-436f-a369-2ee4979af697" (UID: "6f92c9d3-00f7-436f-a369-2ee4979af697"). InnerVolumeSpecName "kube-api-access-z7cxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.619874 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6f92c9d3-00f7-436f-a369-2ee4979af697-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.619913 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7cxq\" (UniqueName: \"kubernetes.io/projected/6f92c9d3-00f7-436f-a369-2ee4979af697-kube-api-access-z7cxq\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.619927 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.619939 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.619951 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f92c9d3-00f7-436f-a369-2ee4979af697-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.732159 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:25:23 crc kubenswrapper[4884]: I1128 15:25:23.735975 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5bcc4c77b6-qtzh5"] Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.697078 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f92c9d3-00f7-436f-a369-2ee4979af697" path="/var/lib/kubelet/pods/6f92c9d3-00f7-436f-a369-2ee4979af697/volumes" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.813838 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv"] Nov 28 15:25:24 crc kubenswrapper[4884]: E1128 15:25:24.814115 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f92c9d3-00f7-436f-a369-2ee4979af697" containerName="controller-manager" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.814132 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f92c9d3-00f7-436f-a369-2ee4979af697" containerName="controller-manager" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.814219 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f92c9d3-00f7-436f-a369-2ee4979af697" containerName="controller-manager" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.814576 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.818079 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.818812 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.820369 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.820719 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.821803 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.822184 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.828578 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv"] Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.843242 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.937571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-config\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.937943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkxwv\" (UniqueName: \"kubernetes.io/projected/d67a2ec3-0f9e-4833-ba45-e4f511992398-kube-api-access-tkxwv\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.938065 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d67a2ec3-0f9e-4833-ba45-e4f511992398-serving-cert\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.938201 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-proxy-ca-bundles\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:24 crc kubenswrapper[4884]: I1128 15:25:24.938337 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-client-ca\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.039202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-client-ca\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.039271 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-config\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.039293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkxwv\" (UniqueName: \"kubernetes.io/projected/d67a2ec3-0f9e-4833-ba45-e4f511992398-kube-api-access-tkxwv\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.039313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d67a2ec3-0f9e-4833-ba45-e4f511992398-serving-cert\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.039340 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-proxy-ca-bundles\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.040559 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-client-ca\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.040626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-proxy-ca-bundles\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.041981 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d67a2ec3-0f9e-4833-ba45-e4f511992398-config\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.045004 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d67a2ec3-0f9e-4833-ba45-e4f511992398-serving-cert\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.067233 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkxwv\" (UniqueName: \"kubernetes.io/projected/d67a2ec3-0f9e-4833-ba45-e4f511992398-kube-api-access-tkxwv\") pod \"controller-manager-5c7d6bbd5d-q8npv\" (UID: \"d67a2ec3-0f9e-4833-ba45-e4f511992398\") " pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.138945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:25 crc kubenswrapper[4884]: I1128 15:25:25.536897 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv"] Nov 28 15:25:26 crc kubenswrapper[4884]: I1128 15:25:26.405809 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" event={"ID":"d67a2ec3-0f9e-4833-ba45-e4f511992398","Type":"ContainerStarted","Data":"489f15db972a2b98a91fbcf787655c1c4689c3d4c5ec9e991deb248accccdee1"} Nov 28 15:25:26 crc kubenswrapper[4884]: I1128 15:25:26.406123 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" event={"ID":"d67a2ec3-0f9e-4833-ba45-e4f511992398","Type":"ContainerStarted","Data":"235b96ea688862db5987e9b696ae10cf5981f40fa22d22bf077187642a141322"} Nov 28 15:25:26 crc kubenswrapper[4884]: I1128 15:25:26.406143 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:26 crc kubenswrapper[4884]: I1128 15:25:26.411081 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" Nov 28 15:25:26 crc kubenswrapper[4884]: I1128 15:25:26.423300 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5c7d6bbd5d-q8npv" podStartSLOduration=3.423279572 podStartE2EDuration="3.423279572s" podCreationTimestamp="2025-11-28 15:25:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:26.422430069 +0000 UTC m=+365.985213890" watchObservedRunningTime="2025-11-28 15:25:26.423279572 +0000 UTC m=+365.986063373" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.095983 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.097485 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8wcfx" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="registry-server" containerID="cri-o://1559e028d0dbe09f7198565a9e93906833e3b97337ee42b78f6f43e3f04099eb" gracePeriod=30 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.106274 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.118573 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.118885 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" containerID="cri-o://107432f3db8b9c5bca1c6b1d98ab5fd4b22f20bb612fc4b257367cfcecc5e2f0" gracePeriod=30 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.136787 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.137075 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-22bgs" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="registry-server" containerID="cri-o://339d1f2c0b3f44e245ca91e8e46021d0aa27c6381f3f031f77603d36dfdc1378" gracePeriod=30 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.157484 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-54xz2"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.158626 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.173600 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-54xz2"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.175984 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.177670 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rv8wt" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="registry-server" containerID="cri-o://91afbe1ed1204860d5bb8a94e87a81f9e945c747a44ed7833fe814fddc7e724f" gracePeriod=30 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.311060 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.311148 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.311178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtc8q\" (UniqueName: \"kubernetes.io/projected/c624c933-d370-42a6-ae60-1bc50e004476-kube-api-access-wtc8q\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.363810 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-52w4z" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.412163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.412232 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtc8q\" (UniqueName: \"kubernetes.io/projected/c624c933-d370-42a6-ae60-1bc50e004476-kube-api-access-wtc8q\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.412291 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.413740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.417507 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.420872 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c624c933-d370-42a6-ae60-1bc50e004476-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.449587 4884 generic.go:334] "Generic (PLEG): container finished" podID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerID="91afbe1ed1204860d5bb8a94e87a81f9e945c747a44ed7833fe814fddc7e724f" exitCode=0 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.449663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerDied","Data":"91afbe1ed1204860d5bb8a94e87a81f9e945c747a44ed7833fe814fddc7e724f"} Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.451455 4884 generic.go:334] "Generic (PLEG): container finished" podID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerID="107432f3db8b9c5bca1c6b1d98ab5fd4b22f20bb612fc4b257367cfcecc5e2f0" exitCode=0 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.451526 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerDied","Data":"107432f3db8b9c5bca1c6b1d98ab5fd4b22f20bb612fc4b257367cfcecc5e2f0"} Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.451626 4884 scope.go:117] "RemoveContainer" containerID="514e6e98b772c23c77c07f2f359d6edc5c466870647d09c9e9edf5f48fa565c1" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.458741 4884 generic.go:334] "Generic (PLEG): container finished" podID="52b0dffb-d746-416e-9494-6562cb444a5b" containerID="1559e028d0dbe09f7198565a9e93906833e3b97337ee42b78f6f43e3f04099eb" exitCode=0 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.458778 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerDied","Data":"1559e028d0dbe09f7198565a9e93906833e3b97337ee42b78f6f43e3f04099eb"} Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.463984 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtc8q\" (UniqueName: \"kubernetes.io/projected/c624c933-d370-42a6-ae60-1bc50e004476-kube-api-access-wtc8q\") pod \"marketplace-operator-79b997595-54xz2\" (UID: \"c624c933-d370-42a6-ae60-1bc50e004476\") " pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.466607 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerID="339d1f2c0b3f44e245ca91e8e46021d0aa27c6381f3f031f77603d36dfdc1378" exitCode=0 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.466801 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b6kf2" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="registry-server" containerID="cri-o://bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027" gracePeriod=30 Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.467074 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerDied","Data":"339d1f2c0b3f44e245ca91e8e46021d0aa27c6381f3f031f77603d36dfdc1378"} Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.474116 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.588664 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.716503 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca\") pod \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.716539 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfxkr\" (UniqueName: \"kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr\") pod \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.716609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics\") pod \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\" (UID: \"11aebc28-b264-4e1e-bf43-d5644a24b2ca\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.717543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "11aebc28-b264-4e1e-bf43-d5644a24b2ca" (UID: "11aebc28-b264-4e1e-bf43-d5644a24b2ca"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.726130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr" (OuterVolumeSpecName: "kube-api-access-cfxkr") pod "11aebc28-b264-4e1e-bf43-d5644a24b2ca" (UID: "11aebc28-b264-4e1e-bf43-d5644a24b2ca"). InnerVolumeSpecName "kube-api-access-cfxkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.726706 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "11aebc28-b264-4e1e-bf43-d5644a24b2ca" (UID: "11aebc28-b264-4e1e-bf43-d5644a24b2ca"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.759004 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.765886 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.817975 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.818006 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfxkr\" (UniqueName: \"kubernetes.io/projected/11aebc28-b264-4e1e-bf43-d5644a24b2ca-kube-api-access-cfxkr\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.818018 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/11aebc28-b264-4e1e-bf43-d5644a24b2ca-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.820352 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.919383 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content\") pod \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.919683 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities\") pod \"0327ad91-6ead-42fe-9911-c0eaa52128f7\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.919718 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities\") pod \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.919792 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities\") pod \"52b0dffb-d746-416e-9494-6562cb444a5b\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920471 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities" (OuterVolumeSpecName: "utilities") pod "0327ad91-6ead-42fe-9911-c0eaa52128f7" (UID: "0327ad91-6ead-42fe-9911-c0eaa52128f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920531 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities" (OuterVolumeSpecName: "utilities") pod "fc6aa808-4684-4ba5-93bd-cb8ba9edca63" (UID: "fc6aa808-4684-4ba5-93bd-cb8ba9edca63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920548 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities" (OuterVolumeSpecName: "utilities") pod "52b0dffb-d746-416e-9494-6562cb444a5b" (UID: "52b0dffb-d746-416e-9494-6562cb444a5b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content\") pod \"0327ad91-6ead-42fe-9911-c0eaa52128f7\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95466\" (UniqueName: \"kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466\") pod \"0327ad91-6ead-42fe-9911-c0eaa52128f7\" (UID: \"0327ad91-6ead-42fe-9911-c0eaa52128f7\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920645 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ttmk\" (UniqueName: \"kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk\") pod \"52b0dffb-d746-416e-9494-6562cb444a5b\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content\") pod \"52b0dffb-d746-416e-9494-6562cb444a5b\" (UID: \"52b0dffb-d746-416e-9494-6562cb444a5b\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920718 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dczv\" (UniqueName: \"kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv\") pod \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\" (UID: \"fc6aa808-4684-4ba5-93bd-cb8ba9edca63\") " Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920905 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920922 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.920933 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.924266 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv" (OuterVolumeSpecName: "kube-api-access-4dczv") pod "fc6aa808-4684-4ba5-93bd-cb8ba9edca63" (UID: "fc6aa808-4684-4ba5-93bd-cb8ba9edca63"). InnerVolumeSpecName "kube-api-access-4dczv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.925601 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466" (OuterVolumeSpecName: "kube-api-access-95466") pod "0327ad91-6ead-42fe-9911-c0eaa52128f7" (UID: "0327ad91-6ead-42fe-9911-c0eaa52128f7"). InnerVolumeSpecName "kube-api-access-95466". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.926242 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk" (OuterVolumeSpecName: "kube-api-access-5ttmk") pod "52b0dffb-d746-416e-9494-6562cb444a5b" (UID: "52b0dffb-d746-416e-9494-6562cb444a5b"). InnerVolumeSpecName "kube-api-access-5ttmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.935226 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.941779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc6aa808-4684-4ba5-93bd-cb8ba9edca63" (UID: "fc6aa808-4684-4ba5-93bd-cb8ba9edca63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:29 crc kubenswrapper[4884]: I1128 15:25:29.987522 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52b0dffb-d746-416e-9494-6562cb444a5b" (UID: "52b0dffb-d746-416e-9494-6562cb444a5b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.021936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities\") pod \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022008 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content\") pod \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022048 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k98vg\" (UniqueName: \"kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg\") pod \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\" (UID: \"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b\") " Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022249 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dczv\" (UniqueName: \"kubernetes.io/projected/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-kube-api-access-4dczv\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022260 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc6aa808-4684-4ba5-93bd-cb8ba9edca63-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022269 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95466\" (UniqueName: \"kubernetes.io/projected/0327ad91-6ead-42fe-9911-c0eaa52128f7-kube-api-access-95466\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022278 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ttmk\" (UniqueName: \"kubernetes.io/projected/52b0dffb-d746-416e-9494-6562cb444a5b-kube-api-access-5ttmk\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022286 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52b0dffb-d746-416e-9494-6562cb444a5b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.022756 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities" (OuterVolumeSpecName: "utilities") pod "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" (UID: "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.024301 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg" (OuterVolumeSpecName: "kube-api-access-k98vg") pod "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" (UID: "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b"). InnerVolumeSpecName "kube-api-access-k98vg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.041820 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0327ad91-6ead-42fe-9911-c0eaa52128f7" (UID: "0327ad91-6ead-42fe-9911-c0eaa52128f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.091358 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" (UID: "ba35f9f9-8eda-4aee-9cd8-7140421e5a2b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.115601 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-54xz2"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.124313 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.124384 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.124419 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k98vg\" (UniqueName: \"kubernetes.io/projected/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b-kube-api-access-k98vg\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.124448 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0327ad91-6ead-42fe-9911-c0eaa52128f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.476878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8wcfx" event={"ID":"52b0dffb-d746-416e-9494-6562cb444a5b","Type":"ContainerDied","Data":"689471d40e183179577ffbd52e57e03fdabb6483e0910180fb207a471804f396"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.477239 4884 scope.go:117] "RemoveContainer" containerID="1559e028d0dbe09f7198565a9e93906833e3b97337ee42b78f6f43e3f04099eb" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.476929 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8wcfx" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.480758 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-22bgs" event={"ID":"fc6aa808-4684-4ba5-93bd-cb8ba9edca63","Type":"ContainerDied","Data":"6e404c894926c58cc31441d0a44726441acc07622dcfad884a45455894515552"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.480806 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-22bgs" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.482593 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" event={"ID":"c624c933-d370-42a6-ae60-1bc50e004476","Type":"ContainerStarted","Data":"0880fec254703a80030e79108b05517829892222217d23a083b3484d9b7eba13"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.482641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" event={"ID":"c624c933-d370-42a6-ae60-1bc50e004476","Type":"ContainerStarted","Data":"0d8d8b4c98b6beba882628fb392673fb9b2ade554b66dc59794fe83868840133"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.482775 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.484137 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-54xz2 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" start-of-body= Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.484171 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" podUID="c624c933-d370-42a6-ae60-1bc50e004476" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.63:8080/healthz\": dial tcp 10.217.0.63:8080: connect: connection refused" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.485324 4884 generic.go:334] "Generic (PLEG): container finished" podID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerID="bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027" exitCode=0 Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.485353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerDied","Data":"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.485382 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b6kf2" event={"ID":"ba35f9f9-8eda-4aee-9cd8-7140421e5a2b","Type":"ContainerDied","Data":"8ad9b87145dd3030a1187d904d745a78aad0fc8aafb2f9797cd358ca4140b7fd"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.485404 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b6kf2" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.488690 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rv8wt" event={"ID":"0327ad91-6ead-42fe-9911-c0eaa52128f7","Type":"ContainerDied","Data":"0eb9586c1fa1e28bfd0319954d94dece0c030c10b711d3c35748dc1488ff459a"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.488819 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rv8wt" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.494898 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" event={"ID":"11aebc28-b264-4e1e-bf43-d5644a24b2ca","Type":"ContainerDied","Data":"4c1b6f547bf2ed7fb9555852b0f54c1be604e9f7fa418ba543c12ed3daabaa7e"} Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.494937 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9frzk" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.510421 4884 scope.go:117] "RemoveContainer" containerID="d7dd41d05acc94f3c5984482ada0bdeaede674c3436493f611fb7fb9badd2815" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.520529 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" podStartSLOduration=1.5205036920000001 podStartE2EDuration="1.520503692s" podCreationTimestamp="2025-11-28 15:25:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:25:30.514727549 +0000 UTC m=+370.077511360" watchObservedRunningTime="2025-11-28 15:25:30.520503692 +0000 UTC m=+370.083287493" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.548114 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.550906 4884 scope.go:117] "RemoveContainer" containerID="4bf4ab95b5af12e7644956d87d612b67b66f61fc1669265f49898e3972d626fe" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.558309 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8wcfx"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.563977 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.569233 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-22bgs"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.573774 4884 scope.go:117] "RemoveContainer" containerID="339d1f2c0b3f44e245ca91e8e46021d0aa27c6381f3f031f77603d36dfdc1378" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.573940 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.578121 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rv8wt"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.587927 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.593790 4884 scope.go:117] "RemoveContainer" containerID="71c9a579495e856da43a8c29570c78b8ac2f43186cc4c52d9f5d6d76690e78d3" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.594668 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9frzk"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.599537 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.604582 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b6kf2"] Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.608161 4884 scope.go:117] "RemoveContainer" containerID="4416ec7450c2ad1ae55283471ea976574e5b041edf17e6467906b5017d466623" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.624461 4884 scope.go:117] "RemoveContainer" containerID="bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.641555 4884 scope.go:117] "RemoveContainer" containerID="3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.657173 4884 scope.go:117] "RemoveContainer" containerID="a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.671509 4884 scope.go:117] "RemoveContainer" containerID="bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027" Nov 28 15:25:30 crc kubenswrapper[4884]: E1128 15:25:30.671913 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027\": container with ID starting with bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027 not found: ID does not exist" containerID="bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.671949 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027"} err="failed to get container status \"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027\": rpc error: code = NotFound desc = could not find container \"bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027\": container with ID starting with bbc1987ecbad1f873c8b131a5d65d3377c9a2018f86025e63f3ec2d4f6238027 not found: ID does not exist" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.671972 4884 scope.go:117] "RemoveContainer" containerID="3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922" Nov 28 15:25:30 crc kubenswrapper[4884]: E1128 15:25:30.672281 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922\": container with ID starting with 3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922 not found: ID does not exist" containerID="3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.672310 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922"} err="failed to get container status \"3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922\": rpc error: code = NotFound desc = could not find container \"3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922\": container with ID starting with 3566041fbb02bd0151cda2066ebc7c015eabaf6262a612996eb5e5663951b922 not found: ID does not exist" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.672332 4884 scope.go:117] "RemoveContainer" containerID="a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73" Nov 28 15:25:30 crc kubenswrapper[4884]: E1128 15:25:30.672613 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73\": container with ID starting with a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73 not found: ID does not exist" containerID="a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.672672 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73"} err="failed to get container status \"a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73\": rpc error: code = NotFound desc = could not find container \"a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73\": container with ID starting with a7b0a75c4f4a34d300252c75b6f90cd2bf55ee2e292eac913c7b6ba9b39b2b73 not found: ID does not exist" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.672718 4884 scope.go:117] "RemoveContainer" containerID="91afbe1ed1204860d5bb8a94e87a81f9e945c747a44ed7833fe814fddc7e724f" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.686333 4884 scope.go:117] "RemoveContainer" containerID="057fda1dc83274fb0b7fcd966d5f822c474c0dc2a61172987dc4eb272919d2dd" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.693218 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" path="/var/lib/kubelet/pods/0327ad91-6ead-42fe-9911-c0eaa52128f7/volumes" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.693830 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" path="/var/lib/kubelet/pods/11aebc28-b264-4e1e-bf43-d5644a24b2ca/volumes" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.694328 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" path="/var/lib/kubelet/pods/52b0dffb-d746-416e-9494-6562cb444a5b/volumes" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.695440 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" path="/var/lib/kubelet/pods/ba35f9f9-8eda-4aee-9cd8-7140421e5a2b/volumes" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.696052 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" path="/var/lib/kubelet/pods/fc6aa808-4684-4ba5-93bd-cb8ba9edca63/volumes" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.703428 4884 scope.go:117] "RemoveContainer" containerID="6d76e8ed2496bfac58516c82c9a6c73da7bb3c2b78d279b1158edcc24df90bf8" Nov 28 15:25:30 crc kubenswrapper[4884]: I1128 15:25:30.716415 4884 scope.go:117] "RemoveContainer" containerID="107432f3db8b9c5bca1c6b1d98ab5fd4b22f20bb612fc4b257367cfcecc5e2f0" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308065 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308290 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308304 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308312 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308318 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308326 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308332 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308341 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308347 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308358 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308363 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308372 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308377 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308388 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308394 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308400 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308405 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308413 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308418 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308426 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308432 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308440 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308445 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308454 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308460 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="extract-content" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308468 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308474 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: E1128 15:25:31.308481 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308487 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="extract-utilities" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308573 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308583 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0327ad91-6ead-42fe-9911-c0eaa52128f7" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308596 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba35f9f9-8eda-4aee-9cd8-7140421e5a2b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308605 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="52b0dffb-d746-416e-9494-6562cb444a5b" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308613 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="11aebc28-b264-4e1e-bf43-d5644a24b2ca" containerName="marketplace-operator" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.308621 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc6aa808-4684-4ba5-93bd-cb8ba9edca63" containerName="registry-server" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.310296 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.317140 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.324619 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.447335 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.447388 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.447443 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62scd\" (UniqueName: \"kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.522127 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.526360 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.530903 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.532984 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-54xz2" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.541126 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.548432 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62scd\" (UniqueName: \"kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.548493 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.548517 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.548902 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.549260 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.580288 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62scd\" (UniqueName: \"kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd\") pod \"redhat-marketplace-twsk5\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.640319 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.650423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpz6x\" (UniqueName: \"kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.650760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.650985 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.752447 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.752778 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.752814 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpz6x\" (UniqueName: \"kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.753334 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.753333 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.773030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpz6x\" (UniqueName: \"kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x\") pod \"certified-operators-4vdxb\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:31 crc kubenswrapper[4884]: I1128 15:25:31.841309 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.040412 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 15:25:32 crc kubenswrapper[4884]: W1128 15:25:32.043901 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0a72ebb_65a3_4657_8af5_f42f71a4fa53.slice/crio-29f950309472723710a0d2da2a02151b1c9ec21863e537e5233df9c8d16d0e60 WatchSource:0}: Error finding container 29f950309472723710a0d2da2a02151b1c9ec21863e537e5233df9c8d16d0e60: Status 404 returned error can't find the container with id 29f950309472723710a0d2da2a02151b1c9ec21863e537e5233df9c8d16d0e60 Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.225962 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 15:25:32 crc kubenswrapper[4884]: W1128 15:25:32.248401 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61968083_3a85_48df_8adb_727b32a9720d.slice/crio-a5cbba13ca4f45aa15dfbafc3071e82649d431ed3e9ad6150b8f75a118d801fc WatchSource:0}: Error finding container a5cbba13ca4f45aa15dfbafc3071e82649d431ed3e9ad6150b8f75a118d801fc: Status 404 returned error can't find the container with id a5cbba13ca4f45aa15dfbafc3071e82649d431ed3e9ad6150b8f75a118d801fc Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.532254 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerID="de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2" exitCode=0 Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.532327 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerDied","Data":"de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2"} Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.532356 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerStarted","Data":"29f950309472723710a0d2da2a02151b1c9ec21863e537e5233df9c8d16d0e60"} Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.535356 4884 generic.go:334] "Generic (PLEG): container finished" podID="61968083-3a85-48df-8adb-727b32a9720d" containerID="daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e" exitCode=0 Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.535980 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerDied","Data":"daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e"} Nov 28 15:25:32 crc kubenswrapper[4884]: I1128 15:25:32.536046 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerStarted","Data":"a5cbba13ca4f45aa15dfbafc3071e82649d431ed3e9ad6150b8f75a118d801fc"} Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.543972 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerStarted","Data":"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf"} Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.546742 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerID="dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc" exitCode=0 Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.546777 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerDied","Data":"dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc"} Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.731968 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c8fbg"] Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.736415 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.739866 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.740751 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c8fbg"] Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.887311 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz7h7\" (UniqueName: \"kubernetes.io/projected/a17cada8-f3df-43e7-b073-61ae4ebec3a2-kube-api-access-fz7h7\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.887370 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-catalog-content\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.887433 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-utilities\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.920855 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.922501 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.926996 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.927520 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.988701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz7h7\" (UniqueName: \"kubernetes.io/projected/a17cada8-f3df-43e7-b073-61ae4ebec3a2-kube-api-access-fz7h7\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.988744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-catalog-content\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.988769 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-utilities\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.989197 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-catalog-content\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:33 crc kubenswrapper[4884]: I1128 15:25:33.989449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a17cada8-f3df-43e7-b073-61ae4ebec3a2-utilities\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.012050 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz7h7\" (UniqueName: \"kubernetes.io/projected/a17cada8-f3df-43e7-b073-61ae4ebec3a2-kube-api-access-fz7h7\") pod \"redhat-operators-c8fbg\" (UID: \"a17cada8-f3df-43e7-b073-61ae4ebec3a2\") " pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.072882 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.091592 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4prw8\" (UniqueName: \"kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.091748 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.091945 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.193376 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4prw8\" (UniqueName: \"kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.193651 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.193689 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.194199 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.194286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.215118 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4prw8\" (UniqueName: \"kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8\") pod \"community-operators-q82p5\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.241403 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.468280 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c8fbg"] Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.565523 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerStarted","Data":"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358"} Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.566396 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c8fbg" event={"ID":"a17cada8-f3df-43e7-b073-61ae4ebec3a2","Type":"ContainerStarted","Data":"0b58cb261e3d4a7ef950b158bfbfbf7c7dac60d1f40132855dc70982f65e0274"} Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.571788 4884 generic.go:334] "Generic (PLEG): container finished" podID="61968083-3a85-48df-8adb-727b32a9720d" containerID="0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf" exitCode=0 Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.572168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerDied","Data":"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf"} Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.601925 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-twsk5" podStartSLOduration=2.019004014 podStartE2EDuration="3.601906842s" podCreationTimestamp="2025-11-28 15:25:31 +0000 UTC" firstStartedPulling="2025-11-28 15:25:32.533765999 +0000 UTC m=+372.096549800" lastFinishedPulling="2025-11-28 15:25:34.116668827 +0000 UTC m=+373.679452628" observedRunningTime="2025-11-28 15:25:34.585321662 +0000 UTC m=+374.148105463" watchObservedRunningTime="2025-11-28 15:25:34.601906842 +0000 UTC m=+374.164690643" Nov 28 15:25:34 crc kubenswrapper[4884]: I1128 15:25:34.647060 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 15:25:34 crc kubenswrapper[4884]: W1128 15:25:34.715669 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0cd71d0_76ac_4f4e_8e35_f1624368814e.slice/crio-12581e966bb90f914c4bef9fc4b1a8014bde2c79aecf085c99e019eaa389287d WatchSource:0}: Error finding container 12581e966bb90f914c4bef9fc4b1a8014bde2c79aecf085c99e019eaa389287d: Status 404 returned error can't find the container with id 12581e966bb90f914c4bef9fc4b1a8014bde2c79aecf085c99e019eaa389287d Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.579375 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerStarted","Data":"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62"} Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.580961 4884 generic.go:334] "Generic (PLEG): container finished" podID="a17cada8-f3df-43e7-b073-61ae4ebec3a2" containerID="1527f936a3fbd0ef6bdbe3a5caf1733e60654bc63d121aac93992ff42eeb7e91" exitCode=0 Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.581008 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c8fbg" event={"ID":"a17cada8-f3df-43e7-b073-61ae4ebec3a2","Type":"ContainerDied","Data":"1527f936a3fbd0ef6bdbe3a5caf1733e60654bc63d121aac93992ff42eeb7e91"} Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.582858 4884 generic.go:334] "Generic (PLEG): container finished" podID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerID="5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce" exitCode=0 Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.583335 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerDied","Data":"5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce"} Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.583364 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerStarted","Data":"12581e966bb90f914c4bef9fc4b1a8014bde2c79aecf085c99e019eaa389287d"} Nov 28 15:25:35 crc kubenswrapper[4884]: I1128 15:25:35.603462 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4vdxb" podStartSLOduration=1.85009332 podStartE2EDuration="4.603448385s" podCreationTimestamp="2025-11-28 15:25:31 +0000 UTC" firstStartedPulling="2025-11-28 15:25:32.537038955 +0000 UTC m=+372.099822756" lastFinishedPulling="2025-11-28 15:25:35.290394 +0000 UTC m=+374.853177821" observedRunningTime="2025-11-28 15:25:35.596798669 +0000 UTC m=+375.159582470" watchObservedRunningTime="2025-11-28 15:25:35.603448385 +0000 UTC m=+375.166232186" Nov 28 15:25:36 crc kubenswrapper[4884]: I1128 15:25:36.590600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c8fbg" event={"ID":"a17cada8-f3df-43e7-b073-61ae4ebec3a2","Type":"ContainerStarted","Data":"81fdc72281529a5e4c7ee29c86c80435a80e27fe9dc0a2a40b7830d1c53a6a5d"} Nov 28 15:25:36 crc kubenswrapper[4884]: I1128 15:25:36.593991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerStarted","Data":"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310"} Nov 28 15:25:37 crc kubenswrapper[4884]: I1128 15:25:37.601116 4884 generic.go:334] "Generic (PLEG): container finished" podID="a17cada8-f3df-43e7-b073-61ae4ebec3a2" containerID="81fdc72281529a5e4c7ee29c86c80435a80e27fe9dc0a2a40b7830d1c53a6a5d" exitCode=0 Nov 28 15:25:37 crc kubenswrapper[4884]: I1128 15:25:37.601136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c8fbg" event={"ID":"a17cada8-f3df-43e7-b073-61ae4ebec3a2","Type":"ContainerDied","Data":"81fdc72281529a5e4c7ee29c86c80435a80e27fe9dc0a2a40b7830d1c53a6a5d"} Nov 28 15:25:37 crc kubenswrapper[4884]: I1128 15:25:37.604391 4884 generic.go:334] "Generic (PLEG): container finished" podID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerID="adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310" exitCode=0 Nov 28 15:25:37 crc kubenswrapper[4884]: I1128 15:25:37.604426 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerDied","Data":"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310"} Nov 28 15:25:39 crc kubenswrapper[4884]: I1128 15:25:39.621492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c8fbg" event={"ID":"a17cada8-f3df-43e7-b073-61ae4ebec3a2","Type":"ContainerStarted","Data":"92e364e1517f334dcd8d33aea783504335ba71b65a85cb0d7af787e57273a9d8"} Nov 28 15:25:39 crc kubenswrapper[4884]: I1128 15:25:39.625179 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerStarted","Data":"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb"} Nov 28 15:25:39 crc kubenswrapper[4884]: I1128 15:25:39.641110 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c8fbg" podStartSLOduration=4.091379613 podStartE2EDuration="6.641081334s" podCreationTimestamp="2025-11-28 15:25:33 +0000 UTC" firstStartedPulling="2025-11-28 15:25:35.582470798 +0000 UTC m=+375.145254599" lastFinishedPulling="2025-11-28 15:25:38.132172519 +0000 UTC m=+377.694956320" observedRunningTime="2025-11-28 15:25:39.639104892 +0000 UTC m=+379.201888763" watchObservedRunningTime="2025-11-28 15:25:39.641081334 +0000 UTC m=+379.203865135" Nov 28 15:25:39 crc kubenswrapper[4884]: I1128 15:25:39.666542 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q82p5" podStartSLOduration=4.059504038 podStartE2EDuration="6.666524449s" podCreationTimestamp="2025-11-28 15:25:33 +0000 UTC" firstStartedPulling="2025-11-28 15:25:35.585287863 +0000 UTC m=+375.148071664" lastFinishedPulling="2025-11-28 15:25:38.192308274 +0000 UTC m=+377.755092075" observedRunningTime="2025-11-28 15:25:39.665489981 +0000 UTC m=+379.228273802" watchObservedRunningTime="2025-11-28 15:25:39.666524449 +0000 UTC m=+379.229308250" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.641339 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.642446 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.679861 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.841526 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.842978 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:41 crc kubenswrapper[4884]: I1128 15:25:41.896394 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:42 crc kubenswrapper[4884]: I1128 15:25:42.700611 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 15:25:42 crc kubenswrapper[4884]: I1128 15:25:42.708509 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.073819 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.074384 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.242003 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.242258 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.290563 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:44 crc kubenswrapper[4884]: I1128 15:25:44.711475 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q82p5" Nov 28 15:25:45 crc kubenswrapper[4884]: I1128 15:25:45.118735 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c8fbg" podUID="a17cada8-f3df-43e7-b073-61ae4ebec3a2" containerName="registry-server" probeResult="failure" output=< Nov 28 15:25:45 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:25:45 crc kubenswrapper[4884]: > Nov 28 15:25:51 crc kubenswrapper[4884]: I1128 15:25:51.243160 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:25:51 crc kubenswrapper[4884]: I1128 15:25:51.243463 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:25:54 crc kubenswrapper[4884]: I1128 15:25:54.131604 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:54 crc kubenswrapper[4884]: I1128 15:25:54.183557 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c8fbg" Nov 28 15:25:54 crc kubenswrapper[4884]: I1128 15:25:54.472474 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" podUID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" containerName="registry" containerID="cri-o://724a33aaa6ef4d14ccee24172dff6fa9a91c1834cc2d0398f86f79915bfabf36" gracePeriod=30 Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.727057 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" containerID="724a33aaa6ef4d14ccee24172dff6fa9a91c1834cc2d0398f86f79915bfabf36" exitCode=0 Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.727144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" event={"ID":"fc42ab1b-612c-4c42-a6ab-ff39c1908565","Type":"ContainerDied","Data":"724a33aaa6ef4d14ccee24172dff6fa9a91c1834cc2d0398f86f79915bfabf36"} Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.727551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" event={"ID":"fc42ab1b-612c-4c42-a6ab-ff39c1908565","Type":"ContainerDied","Data":"a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193"} Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.727570 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a555bc1a449c7d3df8350b60250f59531498a8d42153a6d3fef0f8998363d193" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.750306 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.907355 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.907525 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.907692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.907727 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.907765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.908622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.908800 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bnkt\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.908888 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca\") pod \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\" (UID: \"fc42ab1b-612c-4c42-a6ab-ff39c1908565\") " Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.910386 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.910598 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.916350 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.917042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.917843 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt" (OuterVolumeSpecName: "kube-api-access-9bnkt") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "kube-api-access-9bnkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.920197 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.922859 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 15:25:56 crc kubenswrapper[4884]: I1128 15:25:56.930779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "fc42ab1b-612c-4c42-a6ab-ff39c1908565" (UID: "fc42ab1b-612c-4c42-a6ab-ff39c1908565"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010778 4884 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc42ab1b-612c-4c42-a6ab-ff39c1908565-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010830 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010851 4884 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc42ab1b-612c-4c42-a6ab-ff39c1908565-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010872 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bnkt\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-kube-api-access-9bnkt\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010892 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010909 4884 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.010925 4884 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc42ab1b-612c-4c42-a6ab-ff39c1908565-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.734993 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-ls7t4" Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.782333 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:25:57 crc kubenswrapper[4884]: I1128 15:25:57.789317 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-ls7t4"] Nov 28 15:25:58 crc kubenswrapper[4884]: I1128 15:25:58.694870 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" path="/var/lib/kubelet/pods/fc42ab1b-612c-4c42-a6ab-ff39c1908565/volumes" Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.243200 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.243803 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.243919 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.244596 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.244665 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97" gracePeriod=600 Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.912702 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97" exitCode=0 Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.912776 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97"} Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.913028 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98"} Nov 28 15:26:21 crc kubenswrapper[4884]: I1128 15:26:21.913080 4884 scope.go:117] "RemoveContainer" containerID="049e6fb74340a0b32886073545cdf3ddd26ac787dfa3be348243499f3e35f29a" Nov 28 15:28:20 crc kubenswrapper[4884]: I1128 15:28:20.967660 4884 scope.go:117] "RemoveContainer" containerID="724a33aaa6ef4d14ccee24172dff6fa9a91c1834cc2d0398f86f79915bfabf36" Nov 28 15:28:21 crc kubenswrapper[4884]: I1128 15:28:21.243389 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:28:21 crc kubenswrapper[4884]: I1128 15:28:21.243876 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:28:51 crc kubenswrapper[4884]: I1128 15:28:51.242940 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:28:51 crc kubenswrapper[4884]: I1128 15:28:51.243823 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:29:21 crc kubenswrapper[4884]: I1128 15:29:21.242645 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:29:21 crc kubenswrapper[4884]: I1128 15:29:21.244005 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:29:21 crc kubenswrapper[4884]: I1128 15:29:21.244161 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:29:21 crc kubenswrapper[4884]: I1128 15:29:21.244680 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:29:21 crc kubenswrapper[4884]: I1128 15:29:21.244823 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98" gracePeriod=600 Nov 28 15:29:23 crc kubenswrapper[4884]: I1128 15:29:23.092201 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98" exitCode=0 Nov 28 15:29:23 crc kubenswrapper[4884]: I1128 15:29:23.092287 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98"} Nov 28 15:29:23 crc kubenswrapper[4884]: I1128 15:29:23.092833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e"} Nov 28 15:29:23 crc kubenswrapper[4884]: I1128 15:29:23.092861 4884 scope.go:117] "RemoveContainer" containerID="76651d38a7f1e7373020d4d61462a753c9abd9d4ff1825e3f902dc828166ea97" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.175356 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn"] Nov 28 15:30:00 crc kubenswrapper[4884]: E1128 15:30:00.177107 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" containerName="registry" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.177174 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" containerName="registry" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.178190 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc42ab1b-612c-4c42-a6ab-ff39c1908565" containerName="registry" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.180024 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.185582 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.185972 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn"] Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.186455 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.342252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.342356 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btdms\" (UniqueName: \"kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.342400 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.443599 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.443723 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.443800 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btdms\" (UniqueName: \"kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.444681 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.450731 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.467612 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btdms\" (UniqueName: \"kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms\") pod \"collect-profiles-29405730-rpdfn\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.508792 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:00 crc kubenswrapper[4884]: I1128 15:30:00.925192 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn"] Nov 28 15:30:01 crc kubenswrapper[4884]: I1128 15:30:01.327753 4884 generic.go:334] "Generic (PLEG): container finished" podID="256c7539-5f02-4f8f-9059-63fff2c34910" containerID="8da1323b20d1a6ca4604d53fca58074534c6a8fe439d7f9ba16464983a1c9b38" exitCode=0 Nov 28 15:30:01 crc kubenswrapper[4884]: I1128 15:30:01.327799 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" event={"ID":"256c7539-5f02-4f8f-9059-63fff2c34910","Type":"ContainerDied","Data":"8da1323b20d1a6ca4604d53fca58074534c6a8fe439d7f9ba16464983a1c9b38"} Nov 28 15:30:01 crc kubenswrapper[4884]: I1128 15:30:01.327829 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" event={"ID":"256c7539-5f02-4f8f-9059-63fff2c34910","Type":"ContainerStarted","Data":"be57dd18a5846f8c65d646250cf7c60426e7331610ca1e4add1769341f713f8d"} Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.551642 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.671814 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btdms\" (UniqueName: \"kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms\") pod \"256c7539-5f02-4f8f-9059-63fff2c34910\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.671889 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume\") pod \"256c7539-5f02-4f8f-9059-63fff2c34910\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.671926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume\") pod \"256c7539-5f02-4f8f-9059-63fff2c34910\" (UID: \"256c7539-5f02-4f8f-9059-63fff2c34910\") " Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.672991 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume" (OuterVolumeSpecName: "config-volume") pod "256c7539-5f02-4f8f-9059-63fff2c34910" (UID: "256c7539-5f02-4f8f-9059-63fff2c34910"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.679267 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "256c7539-5f02-4f8f-9059-63fff2c34910" (UID: "256c7539-5f02-4f8f-9059-63fff2c34910"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.679585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms" (OuterVolumeSpecName: "kube-api-access-btdms") pod "256c7539-5f02-4f8f-9059-63fff2c34910" (UID: "256c7539-5f02-4f8f-9059-63fff2c34910"). InnerVolumeSpecName "kube-api-access-btdms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.773062 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btdms\" (UniqueName: \"kubernetes.io/projected/256c7539-5f02-4f8f-9059-63fff2c34910-kube-api-access-btdms\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.773121 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/256c7539-5f02-4f8f-9059-63fff2c34910-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:02 crc kubenswrapper[4884]: I1128 15:30:02.773133 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/256c7539-5f02-4f8f-9059-63fff2c34910-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4884]: I1128 15:30:03.338831 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" event={"ID":"256c7539-5f02-4f8f-9059-63fff2c34910","Type":"ContainerDied","Data":"be57dd18a5846f8c65d646250cf7c60426e7331610ca1e4add1769341f713f8d"} Nov 28 15:30:03 crc kubenswrapper[4884]: I1128 15:30:03.338908 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be57dd18a5846f8c65d646250cf7c60426e7331610ca1e4add1769341f713f8d" Nov 28 15:30:03 crc kubenswrapper[4884]: I1128 15:30:03.339150 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn" Nov 28 15:31:48 crc kubenswrapper[4884]: I1128 15:31:48.573760 4884 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 15:31:51 crc kubenswrapper[4884]: I1128 15:31:51.243606 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:31:51 crc kubenswrapper[4884]: I1128 15:31:51.243983 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:32:21 crc kubenswrapper[4884]: I1128 15:32:21.243204 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:32:21 crc kubenswrapper[4884]: I1128 15:32:21.243817 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:32:51 crc kubenswrapper[4884]: I1128 15:32:51.243011 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:32:51 crc kubenswrapper[4884]: I1128 15:32:51.244017 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:32:51 crc kubenswrapper[4884]: I1128 15:32:51.244139 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:32:51 crc kubenswrapper[4884]: I1128 15:32:51.245158 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:32:51 crc kubenswrapper[4884]: I1128 15:32:51.245284 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e" gracePeriod=600 Nov 28 15:32:52 crc kubenswrapper[4884]: I1128 15:32:52.348251 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e" exitCode=0 Nov 28 15:32:52 crc kubenswrapper[4884]: I1128 15:32:52.348332 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e"} Nov 28 15:32:52 crc kubenswrapper[4884]: I1128 15:32:52.348800 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b"} Nov 28 15:32:52 crc kubenswrapper[4884]: I1128 15:32:52.348849 4884 scope.go:117] "RemoveContainer" containerID="04645a88dabc02232d7240a706d644e9a8ac41eadf2876385d64b783097a7d98" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.251035 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6wh6q"] Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.252551 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-controller" containerID="cri-o://349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253023 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="sbdb" containerID="cri-o://d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253116 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="nbdb" containerID="cri-o://8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253180 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="northd" containerID="cri-o://ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253240 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253296 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-node" containerID="cri-o://7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.253348 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-acl-logging" containerID="cri-o://e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.305389 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" containerID="cri-o://f3401938bf7dbfdd67d846882d2e44239a96c28d4a82bbf37d9d50fd4dc57f2f" gracePeriod=30 Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.382216 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-conmon-349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-conmon-e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-conmon-5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-conmon-7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b77432_5316_4dd6_a4a9_f74651377bdd.slice/crio-7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod237d188f_b799_4a82_bc67_c3a8fac5771f.slice/crio-conmon-80eb534a7e5eab5045722acb46a009f657d4036e304cfda4b74740290f476161.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.524473 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovnkube-controller/3.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.527057 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-acl-logging/0.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.527563 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-controller/0.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.527977 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="f3401938bf7dbfdd67d846882d2e44239a96c28d4a82bbf37d9d50fd4dc57f2f" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528001 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528009 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528015 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528022 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528028 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8" exitCode=0 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528034 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf" exitCode=143 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528041 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerID="349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4" exitCode=143 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528074 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"f3401938bf7dbfdd67d846882d2e44239a96c28d4a82bbf37d9d50fd4dc57f2f"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528113 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528124 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528133 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528158 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528169 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528180 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" event={"ID":"d1b77432-5316-4dd6-a4a9-f74651377bdd","Type":"ContainerDied","Data":"e53d52067fa7c483c8c2177d0d4789bd1b081f25258cdccaaf210fcc93698a8f"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528204 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e53d52067fa7c483c8c2177d0d4789bd1b081f25258cdccaaf210fcc93698a8f" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.528222 4884 scope.go:117] "RemoveContainer" containerID="29a590c9321ded3c261466dac4ed5abd32756a8df2f3e725f3f867be5120a005" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.530273 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/2.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.531331 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/1.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.531355 4884 generic.go:334] "Generic (PLEG): container finished" podID="237d188f-b799-4a82-bc67-c3a8fac5771f" containerID="80eb534a7e5eab5045722acb46a009f657d4036e304cfda4b74740290f476161" exitCode=2 Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.531371 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerDied","Data":"80eb534a7e5eab5045722acb46a009f657d4036e304cfda4b74740290f476161"} Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.531889 4884 scope.go:117] "RemoveContainer" containerID="80eb534a7e5eab5045722acb46a009f657d4036e304cfda4b74740290f476161" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.562485 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-acl-logging/0.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.562959 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-controller/0.log" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.563341 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.577418 4884 scope.go:117] "RemoveContainer" containerID="b12a54003a6be6276d95440ae0eb6ba7c463a8e9b4b34c19217b2b21d3d71f58" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.630720 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7grng"] Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.630990 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631013 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631027 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631036 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631050 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631059 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631069 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="northd" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631077 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="northd" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631107 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="sbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631119 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="sbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631133 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631143 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631154 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631165 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631177 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-node" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631184 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-node" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631198 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="256c7539-5f02-4f8f-9059-63fff2c34910" containerName="collect-profiles" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631206 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="256c7539-5f02-4f8f-9059-63fff2c34910" containerName="collect-profiles" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631217 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kubecfg-setup" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631225 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kubecfg-setup" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631235 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="nbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631242 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="nbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631250 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631257 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631267 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-acl-logging" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631275 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-acl-logging" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631381 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="256c7539-5f02-4f8f-9059-63fff2c34910" containerName="collect-profiles" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631393 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631402 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631411 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="northd" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631423 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="sbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631433 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-acl-logging" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631442 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovn-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631450 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631460 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631470 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="kube-rbac-proxy-node" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631479 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="nbdb" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631490 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: E1128 15:33:21.631599 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631609 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.631720 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" containerName="ovnkube-controller" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.633647 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697132 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697204 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697231 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697290 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697310 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697240 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket" (OuterVolumeSpecName: "log-socket") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697366 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697309 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash" (OuterVolumeSpecName: "host-slash") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697352 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697460 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697461 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697499 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697503 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697524 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697555 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697594 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697644 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697716 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697747 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697752 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697799 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frpmx\" (UniqueName: \"kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx\") pod \"d1b77432-5316-4dd6-a4a9-f74651377bdd\" (UID: \"d1b77432-5316-4dd6-a4a9-f74651377bdd\") " Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697889 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log" (OuterVolumeSpecName: "node-log") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.697983 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovn-node-metrics-cert\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698036 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-kubelet\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-ovn\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698086 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-log-socket\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698120 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698122 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698146 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698179 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-env-overrides\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698168 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698251 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42vg2\" (UniqueName: \"kubernetes.io/projected/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-kube-api-access-42vg2\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698272 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698247 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698306 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-script-lib\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698389 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-node-log\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698409 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-bin\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698436 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-systemd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-etc-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698500 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-var-lib-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698514 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698569 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698521 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-config\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698706 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-systemd-units\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698772 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-netd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698831 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-netns\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.698876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-slash\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699080 4884 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699218 4884 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699241 4884 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699261 4884 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699279 4884 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699296 4884 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699312 4884 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699328 4884 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699342 4884 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699357 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699373 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699387 4884 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699400 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699415 4884 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699431 4884 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699447 4884 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.699461 4884 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.703391 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.705516 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx" (OuterVolumeSpecName: "kube-api-access-frpmx") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "kube-api-access-frpmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.718026 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "d1b77432-5316-4dd6-a4a9-f74651377bdd" (UID: "d1b77432-5316-4dd6-a4a9-f74651377bdd"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800618 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800673 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42vg2\" (UniqueName: \"kubernetes.io/projected/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-kube-api-access-42vg2\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800694 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-script-lib\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800743 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-node-log\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800756 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-bin\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-systemd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800814 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-node-log\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800836 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-systemd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800840 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-etc-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800844 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-bin\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800787 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-ovn-kubernetes\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800793 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-etc-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800924 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-var-lib-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-config\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800966 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-systemd-units\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.800980 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-netd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-netns\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801013 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-slash\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801031 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-run-netns\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-cni-netd\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801065 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovn-node-metrics-cert\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-slash\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801014 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-systemd-units\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-var-lib-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-kubelet\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801181 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-ovn\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801200 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-log-socket\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801245 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-host-kubelet\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-env-overrides\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801268 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-ovn\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801271 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801289 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-log-socket\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801347 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-run-openvswitch\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801367 4884 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/d1b77432-5316-4dd6-a4a9-f74651377bdd-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801451 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frpmx\" (UniqueName: \"kubernetes.io/projected/d1b77432-5316-4dd6-a4a9-f74651377bdd-kube-api-access-frpmx\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.801465 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/d1b77432-5316-4dd6-a4a9-f74651377bdd-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.802140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-script-lib\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.802145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-env-overrides\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.802290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovnkube-config\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.805290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-ovn-node-metrics-cert\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.816764 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42vg2\" (UniqueName: \"kubernetes.io/projected/987e10e9-a9ff-4280-b62e-0a5b3ae091e3-kube-api-access-42vg2\") pod \"ovnkube-node-7grng\" (UID: \"987e10e9-a9ff-4280-b62e-0a5b3ae091e3\") " pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:21 crc kubenswrapper[4884]: I1128 15:33:21.971757 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:22 crc kubenswrapper[4884]: W1128 15:33:22.002326 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod987e10e9_a9ff_4280_b62e_0a5b3ae091e3.slice/crio-12122e98842f1c0aa7bde2ec06c13a47be9476ff74d47d1c5d0ce5dfeccefb5d WatchSource:0}: Error finding container 12122e98842f1c0aa7bde2ec06c13a47be9476ff74d47d1c5d0ce5dfeccefb5d: Status 404 returned error can't find the container with id 12122e98842f1c0aa7bde2ec06c13a47be9476ff74d47d1c5d0ce5dfeccefb5d Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.546917 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-zj27d_237d188f-b799-4a82-bc67-c3a8fac5771f/kube-multus/2.log" Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.548490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-zj27d" event={"ID":"237d188f-b799-4a82-bc67-c3a8fac5771f","Type":"ContainerStarted","Data":"629b68d45465f2a0bd914cd35ab1de5c2720eb9cb1004973a2ba2d83894b914e"} Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.556616 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-acl-logging/0.log" Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.557893 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-6wh6q_d1b77432-5316-4dd6-a4a9-f74651377bdd/ovn-controller/0.log" Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.558951 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-6wh6q" Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.579182 4884 generic.go:334] "Generic (PLEG): container finished" podID="987e10e9-a9ff-4280-b62e-0a5b3ae091e3" containerID="cb3fc42211e22ae070b69539ccf437a59b2e77095955492825abf14b8676d18e" exitCode=0 Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.579273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerDied","Data":"cb3fc42211e22ae070b69539ccf437a59b2e77095955492825abf14b8676d18e"} Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.579352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"12122e98842f1c0aa7bde2ec06c13a47be9476ff74d47d1c5d0ce5dfeccefb5d"} Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.836345 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6wh6q"] Nov 28 15:33:22 crc kubenswrapper[4884]: I1128 15:33:22.842519 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-6wh6q"] Nov 28 15:33:23 crc kubenswrapper[4884]: I1128 15:33:23.587285 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"4b7262108ce45c6669627029180cec2a5991b39c89446548d08019fa38d31328"} Nov 28 15:33:23 crc kubenswrapper[4884]: I1128 15:33:23.587561 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"ad0091999723b9864c63b5ecd35105a9a9aa9a0921c6105cd205b3141ae70c32"} Nov 28 15:33:23 crc kubenswrapper[4884]: I1128 15:33:23.587574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"f5f585f64a76d67e8732b837c7ff3a36dd699b864c5a8153a5a606efdbe238ee"} Nov 28 15:33:23 crc kubenswrapper[4884]: I1128 15:33:23.587584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"128ca7fb19e015b700e33e4480b731551dc0c7f1e9bd0c88e93a398bc4dc408e"} Nov 28 15:33:23 crc kubenswrapper[4884]: I1128 15:33:23.587592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"546c438a23e55b27105413d5ee6580c152de7434adfff1ea991048e0e515c985"} Nov 28 15:33:24 crc kubenswrapper[4884]: I1128 15:33:24.609991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"088cd97c006d2c07badcddea0ae75cb43c68e8b8ff0cc49c03dca8e764a88add"} Nov 28 15:33:24 crc kubenswrapper[4884]: I1128 15:33:24.699549 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1b77432-5316-4dd6-a4a9-f74651377bdd" path="/var/lib/kubelet/pods/d1b77432-5316-4dd6-a4a9-f74651377bdd/volumes" Nov 28 15:33:26 crc kubenswrapper[4884]: I1128 15:33:26.630705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"30e6a041d818c30f2cf72ccfa8cb32293192a9bc9db304022d3968cb77c426a8"} Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.247530 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-nggv8"] Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.248544 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.251365 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.251386 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.251892 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.252188 4884 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ptzd4" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.376715 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28dh7\" (UniqueName: \"kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.377002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.377174 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.478703 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.478761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.478822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28dh7\" (UniqueName: \"kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.479041 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.479762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.503686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28dh7\" (UniqueName: \"kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7\") pod \"crc-storage-crc-nggv8\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: I1128 15:33:27.576296 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: E1128 15:33:27.601577 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(ee3dfa00d99d09099afabae96f3f550864dea0049c212940460703c85d852089): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:33:27 crc kubenswrapper[4884]: E1128 15:33:27.601712 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(ee3dfa00d99d09099afabae96f3f550864dea0049c212940460703c85d852089): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: E1128 15:33:27.601773 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(ee3dfa00d99d09099afabae96f3f550864dea0049c212940460703c85d852089): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:27 crc kubenswrapper[4884]: E1128 15:33:27.601886 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-nggv8_crc-storage(5d0ecad9-0e8e-4826-b39b-97452fe16dfe)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-nggv8_crc-storage(5d0ecad9-0e8e-4826-b39b-97452fe16dfe)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(ee3dfa00d99d09099afabae96f3f550864dea0049c212940460703c85d852089): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-nggv8" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.653400 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" event={"ID":"987e10e9-a9ff-4280-b62e-0a5b3ae091e3","Type":"ContainerStarted","Data":"cbe149e334b6a13f8e9f41e53f933231ddb7bf68070cf0aef06184543c29cc14"} Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.655209 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.655274 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.721208 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.721335 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" podStartSLOduration=8.721320507 podStartE2EDuration="8.721320507s" podCreationTimestamp="2025-11-28 15:33:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:33:29.718783435 +0000 UTC m=+849.281567236" watchObservedRunningTime="2025-11-28 15:33:29.721320507 +0000 UTC m=+849.284104308" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.964825 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nggv8"] Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.964956 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:29 crc kubenswrapper[4884]: I1128 15:33:29.965420 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:30 crc kubenswrapper[4884]: E1128 15:33:30.004122 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(cd703dbd45d7521c319be65f1f822a4bc41eae5a9cb45d2287d74898a6e0f747): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 15:33:30 crc kubenswrapper[4884]: E1128 15:33:30.004187 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(cd703dbd45d7521c319be65f1f822a4bc41eae5a9cb45d2287d74898a6e0f747): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:30 crc kubenswrapper[4884]: E1128 15:33:30.004210 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(cd703dbd45d7521c319be65f1f822a4bc41eae5a9cb45d2287d74898a6e0f747): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:30 crc kubenswrapper[4884]: E1128 15:33:30.004248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-nggv8_crc-storage(5d0ecad9-0e8e-4826-b39b-97452fe16dfe)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-nggv8_crc-storage(5d0ecad9-0e8e-4826-b39b-97452fe16dfe)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-nggv8_crc-storage_5d0ecad9-0e8e-4826-b39b-97452fe16dfe_0(cd703dbd45d7521c319be65f1f822a4bc41eae5a9cb45d2287d74898a6e0f747): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-nggv8" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" Nov 28 15:33:30 crc kubenswrapper[4884]: I1128 15:33:30.658394 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:30 crc kubenswrapper[4884]: I1128 15:33:30.694948 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:41 crc kubenswrapper[4884]: I1128 15:33:41.687289 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:41 crc kubenswrapper[4884]: I1128 15:33:41.688065 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:41 crc kubenswrapper[4884]: I1128 15:33:41.869164 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-nggv8"] Nov 28 15:33:41 crc kubenswrapper[4884]: W1128 15:33:41.874854 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d0ecad9_0e8e_4826_b39b_97452fe16dfe.slice/crio-c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba WatchSource:0}: Error finding container c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba: Status 404 returned error can't find the container with id c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba Nov 28 15:33:41 crc kubenswrapper[4884]: I1128 15:33:41.877426 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:33:42 crc kubenswrapper[4884]: I1128 15:33:42.731755 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nggv8" event={"ID":"5d0ecad9-0e8e-4826-b39b-97452fe16dfe","Type":"ContainerStarted","Data":"c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba"} Nov 28 15:33:43 crc kubenswrapper[4884]: I1128 15:33:43.740747 4884 generic.go:334] "Generic (PLEG): container finished" podID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" containerID="9821f0fddf719c8d9786a0fb5f9613a35e429999f62b67b81d57d7708ea679e7" exitCode=0 Nov 28 15:33:43 crc kubenswrapper[4884]: I1128 15:33:43.741067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nggv8" event={"ID":"5d0ecad9-0e8e-4826-b39b-97452fe16dfe","Type":"ContainerDied","Data":"9821f0fddf719c8d9786a0fb5f9613a35e429999f62b67b81d57d7708ea679e7"} Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.004303 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.114255 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28dh7\" (UniqueName: \"kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7\") pod \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.114396 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt\") pod \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.114487 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage\") pod \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\" (UID: \"5d0ecad9-0e8e-4826-b39b-97452fe16dfe\") " Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.114614 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "5d0ecad9-0e8e-4826-b39b-97452fe16dfe" (UID: "5d0ecad9-0e8e-4826-b39b-97452fe16dfe"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.116035 4884 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.124830 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7" (OuterVolumeSpecName: "kube-api-access-28dh7") pod "5d0ecad9-0e8e-4826-b39b-97452fe16dfe" (UID: "5d0ecad9-0e8e-4826-b39b-97452fe16dfe"). InnerVolumeSpecName "kube-api-access-28dh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.141209 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "5d0ecad9-0e8e-4826-b39b-97452fe16dfe" (UID: "5d0ecad9-0e8e-4826-b39b-97452fe16dfe"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.217180 4884 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.217222 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28dh7\" (UniqueName: \"kubernetes.io/projected/5d0ecad9-0e8e-4826-b39b-97452fe16dfe-kube-api-access-28dh7\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.752919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-nggv8" event={"ID":"5d0ecad9-0e8e-4826-b39b-97452fe16dfe","Type":"ContainerDied","Data":"c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba"} Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.752957 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c623006098effe6a8ca0a9c964b081187be8335f258b750059371cc039f592ba" Nov 28 15:33:45 crc kubenswrapper[4884]: I1128 15:33:45.752979 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-nggv8" Nov 28 15:33:51 crc kubenswrapper[4884]: I1128 15:33:51.994692 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7grng" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.339699 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l"] Nov 28 15:33:52 crc kubenswrapper[4884]: E1128 15:33:52.340155 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" containerName="storage" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.340231 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" containerName="storage" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.340367 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" containerName="storage" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.341054 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.343134 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.354199 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l"] Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.418294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.418340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gcwz\" (UniqueName: \"kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.418384 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.519491 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.519999 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.520320 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gcwz\" (UniqueName: \"kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.520578 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.520734 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.541639 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gcwz\" (UniqueName: \"kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.657382 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:52 crc kubenswrapper[4884]: I1128 15:33:52.840339 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l"] Nov 28 15:33:53 crc kubenswrapper[4884]: I1128 15:33:53.800874 4884 generic.go:334] "Generic (PLEG): container finished" podID="08c41673-b3b0-4766-ae1a-52953e56772b" containerID="da11116d20e66de3e31ccecc3f125839643ae4c9a5de79dfff050d4d247d486f" exitCode=0 Nov 28 15:33:53 crc kubenswrapper[4884]: I1128 15:33:53.800937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" event={"ID":"08c41673-b3b0-4766-ae1a-52953e56772b","Type":"ContainerDied","Data":"da11116d20e66de3e31ccecc3f125839643ae4c9a5de79dfff050d4d247d486f"} Nov 28 15:33:53 crc kubenswrapper[4884]: I1128 15:33:53.800979 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" event={"ID":"08c41673-b3b0-4766-ae1a-52953e56772b","Type":"ContainerStarted","Data":"ec4b8e65b9f86b469bc2c82c122449c8abca3af60f143089d35afcb5519aef89"} Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.556453 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.559129 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.598755 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.646626 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.646690 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd5th\" (UniqueName: \"kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.646723 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.748185 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.748276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd5th\" (UniqueName: \"kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.748315 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.748810 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.748892 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.771320 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd5th\" (UniqueName: \"kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th\") pod \"redhat-operators-xgs8d\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:54 crc kubenswrapper[4884]: I1128 15:33:54.882242 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.286777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:33:55 crc kubenswrapper[4884]: W1128 15:33:55.292719 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod393a2ba8_e38c_44ba_b0ab_b124bab1c371.slice/crio-a6cea2f4839f2974e0a29ec9a354b868bb3761701c3e2653c806468699b25062 WatchSource:0}: Error finding container a6cea2f4839f2974e0a29ec9a354b868bb3761701c3e2653c806468699b25062: Status 404 returned error can't find the container with id a6cea2f4839f2974e0a29ec9a354b868bb3761701c3e2653c806468699b25062 Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.812153 4884 generic.go:334] "Generic (PLEG): container finished" podID="08c41673-b3b0-4766-ae1a-52953e56772b" containerID="164fb078ec24aed0e3891109e17dd4a85d5910d7302df5d73632248718991a99" exitCode=0 Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.812205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" event={"ID":"08c41673-b3b0-4766-ae1a-52953e56772b","Type":"ContainerDied","Data":"164fb078ec24aed0e3891109e17dd4a85d5910d7302df5d73632248718991a99"} Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.813475 4884 generic.go:334] "Generic (PLEG): container finished" podID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerID="e71ab8f59d99fc229203f0851c27a3a6eaf2961ebdd4d6a6ead2c7468f59d5b1" exitCode=0 Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.813499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerDied","Data":"e71ab8f59d99fc229203f0851c27a3a6eaf2961ebdd4d6a6ead2c7468f59d5b1"} Nov 28 15:33:55 crc kubenswrapper[4884]: I1128 15:33:55.813512 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerStarted","Data":"a6cea2f4839f2974e0a29ec9a354b868bb3761701c3e2653c806468699b25062"} Nov 28 15:33:56 crc kubenswrapper[4884]: I1128 15:33:56.821421 4884 generic.go:334] "Generic (PLEG): container finished" podID="08c41673-b3b0-4766-ae1a-52953e56772b" containerID="43ab1ef209b0a638fb16fdfbd76e38ea9347e94522cdba5ed135c624692911de" exitCode=0 Nov 28 15:33:56 crc kubenswrapper[4884]: I1128 15:33:56.821537 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" event={"ID":"08c41673-b3b0-4766-ae1a-52953e56772b","Type":"ContainerDied","Data":"43ab1ef209b0a638fb16fdfbd76e38ea9347e94522cdba5ed135c624692911de"} Nov 28 15:33:56 crc kubenswrapper[4884]: I1128 15:33:56.826254 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerStarted","Data":"e215655545aac3a1a9ff21c6879b22ada9518f59aae02568f50a6f11ede795dd"} Nov 28 15:33:57 crc kubenswrapper[4884]: I1128 15:33:57.832723 4884 generic.go:334] "Generic (PLEG): container finished" podID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerID="e215655545aac3a1a9ff21c6879b22ada9518f59aae02568f50a6f11ede795dd" exitCode=0 Nov 28 15:33:57 crc kubenswrapper[4884]: I1128 15:33:57.832797 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerDied","Data":"e215655545aac3a1a9ff21c6879b22ada9518f59aae02568f50a6f11ede795dd"} Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.173122 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.303592 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util\") pod \"08c41673-b3b0-4766-ae1a-52953e56772b\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.303683 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gcwz\" (UniqueName: \"kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz\") pod \"08c41673-b3b0-4766-ae1a-52953e56772b\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.303728 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle\") pod \"08c41673-b3b0-4766-ae1a-52953e56772b\" (UID: \"08c41673-b3b0-4766-ae1a-52953e56772b\") " Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.304365 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle" (OuterVolumeSpecName: "bundle") pod "08c41673-b3b0-4766-ae1a-52953e56772b" (UID: "08c41673-b3b0-4766-ae1a-52953e56772b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.309228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz" (OuterVolumeSpecName: "kube-api-access-4gcwz") pod "08c41673-b3b0-4766-ae1a-52953e56772b" (UID: "08c41673-b3b0-4766-ae1a-52953e56772b"). InnerVolumeSpecName "kube-api-access-4gcwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.324586 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util" (OuterVolumeSpecName: "util") pod "08c41673-b3b0-4766-ae1a-52953e56772b" (UID: "08c41673-b3b0-4766-ae1a-52953e56772b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.406087 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.406154 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/08c41673-b3b0-4766-ae1a-52953e56772b-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.406174 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gcwz\" (UniqueName: \"kubernetes.io/projected/08c41673-b3b0-4766-ae1a-52953e56772b-kube-api-access-4gcwz\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.843753 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerStarted","Data":"99014b2180ed9cda21909eb52f56a11c8cbd57954861f921b4eecd473ed76580"} Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.847273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" event={"ID":"08c41673-b3b0-4766-ae1a-52953e56772b","Type":"ContainerDied","Data":"ec4b8e65b9f86b469bc2c82c122449c8abca3af60f143089d35afcb5519aef89"} Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.847297 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec4b8e65b9f86b469bc2c82c122449c8abca3af60f143089d35afcb5519aef89" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.847343 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l" Nov 28 15:33:58 crc kubenswrapper[4884]: I1128 15:33:58.864133 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xgs8d" podStartSLOduration=2.1646386890000002 podStartE2EDuration="4.864114416s" podCreationTimestamp="2025-11-28 15:33:54 +0000 UTC" firstStartedPulling="2025-11-28 15:33:55.814450668 +0000 UTC m=+875.377234469" lastFinishedPulling="2025-11-28 15:33:58.513926375 +0000 UTC m=+878.076710196" observedRunningTime="2025-11-28 15:33:58.862700981 +0000 UTC m=+878.425484802" watchObservedRunningTime="2025-11-28 15:33:58.864114416 +0000 UTC m=+878.426898217" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.909312 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr"] Nov 28 15:34:03 crc kubenswrapper[4884]: E1128 15:34:03.909902 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="util" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.909912 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="util" Nov 28 15:34:03 crc kubenswrapper[4884]: E1128 15:34:03.909930 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="pull" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.909935 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="pull" Nov 28 15:34:03 crc kubenswrapper[4884]: E1128 15:34:03.909944 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="extract" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.909950 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="extract" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.910041 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c41673-b3b0-4766-ae1a-52953e56772b" containerName="extract" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.910441 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.912831 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.913116 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-6sqxv" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.913135 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.920159 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr"] Nov 28 15:34:03 crc kubenswrapper[4884]: I1128 15:34:03.977837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ntgj\" (UniqueName: \"kubernetes.io/projected/da26dcb7-e333-4e51-9b40-8ea6c744048e-kube-api-access-8ntgj\") pod \"nmstate-operator-5b5b58f5c8-k96zr\" (UID: \"da26dcb7-e333-4e51-9b40-8ea6c744048e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.079036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ntgj\" (UniqueName: \"kubernetes.io/projected/da26dcb7-e333-4e51-9b40-8ea6c744048e-kube-api-access-8ntgj\") pod \"nmstate-operator-5b5b58f5c8-k96zr\" (UID: \"da26dcb7-e333-4e51-9b40-8ea6c744048e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.102066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ntgj\" (UniqueName: \"kubernetes.io/projected/da26dcb7-e333-4e51-9b40-8ea6c744048e-kube-api-access-8ntgj\") pod \"nmstate-operator-5b5b58f5c8-k96zr\" (UID: \"da26dcb7-e333-4e51-9b40-8ea6c744048e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.227052 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.503456 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr"] Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.883179 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.883320 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:04 crc kubenswrapper[4884]: I1128 15:34:04.895341 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" event={"ID":"da26dcb7-e333-4e51-9b40-8ea6c744048e","Type":"ContainerStarted","Data":"a55db2bc2e86d121b18078b93c4ed1bf4f4ca7076a90ca834285b3b013e7f651"} Nov 28 15:34:05 crc kubenswrapper[4884]: I1128 15:34:05.919664 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xgs8d" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="registry-server" probeResult="failure" output=< Nov 28 15:34:05 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:34:05 crc kubenswrapper[4884]: > Nov 28 15:34:07 crc kubenswrapper[4884]: I1128 15:34:07.913104 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" event={"ID":"da26dcb7-e333-4e51-9b40-8ea6c744048e","Type":"ContainerStarted","Data":"6e5f95507463cb96d011e1d8e670580e36ed8d3b74e507391c992a65be8e6c00"} Nov 28 15:34:07 crc kubenswrapper[4884]: I1128 15:34:07.936937 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-k96zr" podStartSLOduration=2.296581414 podStartE2EDuration="4.9369159s" podCreationTimestamp="2025-11-28 15:34:03 +0000 UTC" firstStartedPulling="2025-11-28 15:34:04.516784303 +0000 UTC m=+884.079568104" lastFinishedPulling="2025-11-28 15:34:07.157118789 +0000 UTC m=+886.719902590" observedRunningTime="2025-11-28 15:34:07.931679661 +0000 UTC m=+887.494463492" watchObservedRunningTime="2025-11-28 15:34:07.9369159 +0000 UTC m=+887.499699731" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.704368 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.706433 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.710457 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-v7kkz" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.715246 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.729110 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-qpkc4"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.730139 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.734652 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.735565 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.741338 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.765905 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793080 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b46aa294-0c5d-47f8-89a5-08b55bc74c95-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-ovs-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793223 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcnlx\" (UniqueName: \"kubernetes.io/projected/85a9a43f-4884-4277-af0b-2e7c0d88de32-kube-api-access-tcnlx\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-nmstate-lock\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793288 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j8zb\" (UniqueName: \"kubernetes.io/projected/b46aa294-0c5d-47f8-89a5-08b55bc74c95-kube-api-access-6j8zb\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhsjf\" (UniqueName: \"kubernetes.io/projected/4deccde2-5e37-4721-83a8-aaeacb1ccbe6-kube-api-access-lhsjf\") pod \"nmstate-metrics-7f946cbc9-hpbq5\" (UID: \"4deccde2-5e37-4721-83a8-aaeacb1ccbe6\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.793355 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-dbus-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.836124 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.836911 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.838674 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.838727 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.838770 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-q6nz2" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.847160 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl"] Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895013 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b46aa294-0c5d-47f8-89a5-08b55bc74c95-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895059 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-ovs-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895166 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/069d1c60-3d48-4f4b-848d-3dd850f26e3a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895246 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcnlx\" (UniqueName: \"kubernetes.io/projected/85a9a43f-4884-4277-af0b-2e7c0d88de32-kube-api-access-tcnlx\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-nmstate-lock\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895440 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-nmstate-lock\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895379 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-ovs-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895490 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j8zb\" (UniqueName: \"kubernetes.io/projected/b46aa294-0c5d-47f8-89a5-08b55bc74c95-kube-api-access-6j8zb\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-dbus-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895587 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhsjf\" (UniqueName: \"kubernetes.io/projected/4deccde2-5e37-4721-83a8-aaeacb1ccbe6-kube-api-access-lhsjf\") pod \"nmstate-metrics-7f946cbc9-hpbq5\" (UID: \"4deccde2-5e37-4721-83a8-aaeacb1ccbe6\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbkbr\" (UniqueName: \"kubernetes.io/projected/069d1c60-3d48-4f4b-848d-3dd850f26e3a-kube-api-access-hbkbr\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.895805 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/85a9a43f-4884-4277-af0b-2e7c0d88de32-dbus-socket\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.903766 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b46aa294-0c5d-47f8-89a5-08b55bc74c95-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.911512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcnlx\" (UniqueName: \"kubernetes.io/projected/85a9a43f-4884-4277-af0b-2e7c0d88de32-kube-api-access-tcnlx\") pod \"nmstate-handler-qpkc4\" (UID: \"85a9a43f-4884-4277-af0b-2e7c0d88de32\") " pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.915292 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhsjf\" (UniqueName: \"kubernetes.io/projected/4deccde2-5e37-4721-83a8-aaeacb1ccbe6-kube-api-access-lhsjf\") pod \"nmstate-metrics-7f946cbc9-hpbq5\" (UID: \"4deccde2-5e37-4721-83a8-aaeacb1ccbe6\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.915437 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j8zb\" (UniqueName: \"kubernetes.io/projected/b46aa294-0c5d-47f8-89a5-08b55bc74c95-kube-api-access-6j8zb\") pod \"nmstate-webhook-5f6d4c5ccb-45zjs\" (UID: \"b46aa294-0c5d-47f8-89a5-08b55bc74c95\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.996628 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.996678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/069d1c60-3d48-4f4b-848d-3dd850f26e3a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.996718 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbkbr\" (UniqueName: \"kubernetes.io/projected/069d1c60-3d48-4f4b-848d-3dd850f26e3a-kube-api-access-hbkbr\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:12 crc kubenswrapper[4884]: E1128 15:34:12.996930 4884 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 28 15:34:12 crc kubenswrapper[4884]: E1128 15:34:12.997066 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert podName:069d1c60-3d48-4f4b-848d-3dd850f26e3a nodeName:}" failed. No retries permitted until 2025-11-28 15:34:13.4970383 +0000 UTC m=+893.059822101 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-4nnzl" (UID: "069d1c60-3d48-4f4b-848d-3dd850f26e3a") : secret "plugin-serving-cert" not found Nov 28 15:34:12 crc kubenswrapper[4884]: I1128 15:34:12.997802 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/069d1c60-3d48-4f4b-848d-3dd850f26e3a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.016010 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbkbr\" (UniqueName: \"kubernetes.io/projected/069d1c60-3d48-4f4b-848d-3dd850f26e3a-kube-api-access-hbkbr\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.022203 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-54bc4c7f48-htk8l"] Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.023339 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.032607 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.039195 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54bc4c7f48-htk8l"] Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.045932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.055728 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098448 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098513 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-service-ca\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098578 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jk6x\" (UniqueName: \"kubernetes.io/projected/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-kube-api-access-9jk6x\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098637 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-trusted-ca-bundle\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098659 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-oauth-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.098791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-oauth-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.202895 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.202955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-service-ca\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.202985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.203007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jk6x\" (UniqueName: \"kubernetes.io/projected/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-kube-api-access-9jk6x\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.203040 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-trusted-ca-bundle\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.203062 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-oauth-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.203099 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-oauth-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.204698 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-oauth-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.206182 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-service-ca\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.206749 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.207879 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-trusted-ca-bundle\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.215891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-serving-cert\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.233916 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jk6x\" (UniqueName: \"kubernetes.io/projected/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-kube-api-access-9jk6x\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.249731 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4774f147-dae6-43ff-97f2-e1f5ad3abe1a-console-oauth-config\") pod \"console-54bc4c7f48-htk8l\" (UID: \"4774f147-dae6-43ff-97f2-e1f5ad3abe1a\") " pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.329008 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5"] Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.346593 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.417830 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs"] Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.508184 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.516942 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/069d1c60-3d48-4f4b-848d-3dd850f26e3a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-4nnzl\" (UID: \"069d1c60-3d48-4f4b-848d-3dd850f26e3a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.567948 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54bc4c7f48-htk8l"] Nov 28 15:34:13 crc kubenswrapper[4884]: W1128 15:34:13.570734 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4774f147_dae6_43ff_97f2_e1f5ad3abe1a.slice/crio-6059cafc75521ad2d6aa2aad7fe4d95485786a51fec8d051881a137d418d0ad7 WatchSource:0}: Error finding container 6059cafc75521ad2d6aa2aad7fe4d95485786a51fec8d051881a137d418d0ad7: Status 404 returned error can't find the container with id 6059cafc75521ad2d6aa2aad7fe4d95485786a51fec8d051881a137d418d0ad7 Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.796872 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.975553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qpkc4" event={"ID":"85a9a43f-4884-4277-af0b-2e7c0d88de32","Type":"ContainerStarted","Data":"97ae193a42a4ee47fb4980b836d18a1d3a82c5c310a5e3cd83c41e03ebe1c5e2"} Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.981698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" event={"ID":"4deccde2-5e37-4721-83a8-aaeacb1ccbe6","Type":"ContainerStarted","Data":"47df93eeb9ea76c4301d78e4e0ca3f9687e05ef36eade514786bbbbfd3ebc44d"} Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.983057 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54bc4c7f48-htk8l" event={"ID":"4774f147-dae6-43ff-97f2-e1f5ad3abe1a","Type":"ContainerStarted","Data":"6059cafc75521ad2d6aa2aad7fe4d95485786a51fec8d051881a137d418d0ad7"} Nov 28 15:34:13 crc kubenswrapper[4884]: I1128 15:34:13.984908 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" event={"ID":"b46aa294-0c5d-47f8-89a5-08b55bc74c95","Type":"ContainerStarted","Data":"f9aba40ad935c88d9cdedbe03de1257c3e47f6017c5b642a4368a4b2a1e2c829"} Nov 28 15:34:14 crc kubenswrapper[4884]: I1128 15:34:14.128413 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl"] Nov 28 15:34:14 crc kubenswrapper[4884]: W1128 15:34:14.133008 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod069d1c60_3d48_4f4b_848d_3dd850f26e3a.slice/crio-71065cc34aa0de01c3eda6b61b60adf0f039063958db8cf00d4fe424c3f072ac WatchSource:0}: Error finding container 71065cc34aa0de01c3eda6b61b60adf0f039063958db8cf00d4fe424c3f072ac: Status 404 returned error can't find the container with id 71065cc34aa0de01c3eda6b61b60adf0f039063958db8cf00d4fe424c3f072ac Nov 28 15:34:14 crc kubenswrapper[4884]: I1128 15:34:14.951805 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:14 crc kubenswrapper[4884]: I1128 15:34:14.992612 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" event={"ID":"069d1c60-3d48-4f4b-848d-3dd850f26e3a","Type":"ContainerStarted","Data":"71065cc34aa0de01c3eda6b61b60adf0f039063958db8cf00d4fe424c3f072ac"} Nov 28 15:34:15 crc kubenswrapper[4884]: I1128 15:34:15.004134 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:16 crc kubenswrapper[4884]: I1128 15:34:16.000437 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54bc4c7f48-htk8l" event={"ID":"4774f147-dae6-43ff-97f2-e1f5ad3abe1a","Type":"ContainerStarted","Data":"410e93a2595a71d8cec03aa456033a1acea3c86c0d99cdc3dd63ec5f69956606"} Nov 28 15:34:16 crc kubenswrapper[4884]: I1128 15:34:16.018397 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-54bc4c7f48-htk8l" podStartSLOduration=3.018382802 podStartE2EDuration="3.018382802s" podCreationTimestamp="2025-11-28 15:34:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:34:16.017412248 +0000 UTC m=+895.580196049" watchObservedRunningTime="2025-11-28 15:34:16.018382802 +0000 UTC m=+895.581166603" Nov 28 15:34:17 crc kubenswrapper[4884]: I1128 15:34:17.348467 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:34:17 crc kubenswrapper[4884]: I1128 15:34:17.348822 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xgs8d" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="registry-server" containerID="cri-o://99014b2180ed9cda21909eb52f56a11c8cbd57954861f921b4eecd473ed76580" gracePeriod=2 Nov 28 15:34:18 crc kubenswrapper[4884]: I1128 15:34:18.017416 4884 generic.go:334] "Generic (PLEG): container finished" podID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerID="99014b2180ed9cda21909eb52f56a11c8cbd57954861f921b4eecd473ed76580" exitCode=0 Nov 28 15:34:18 crc kubenswrapper[4884]: I1128 15:34:18.017497 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerDied","Data":"99014b2180ed9cda21909eb52f56a11c8cbd57954861f921b4eecd473ed76580"} Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.862124 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.922206 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities\") pod \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.922322 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xd5th\" (UniqueName: \"kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th\") pod \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.922363 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content\") pod \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\" (UID: \"393a2ba8-e38c-44ba-b0ab-b124bab1c371\") " Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.923248 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities" (OuterVolumeSpecName: "utilities") pod "393a2ba8-e38c-44ba-b0ab-b124bab1c371" (UID: "393a2ba8-e38c-44ba-b0ab-b124bab1c371"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:19 crc kubenswrapper[4884]: I1128 15:34:19.927453 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th" (OuterVolumeSpecName: "kube-api-access-xd5th") pod "393a2ba8-e38c-44ba-b0ab-b124bab1c371" (UID: "393a2ba8-e38c-44ba-b0ab-b124bab1c371"). InnerVolumeSpecName "kube-api-access-xd5th". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.023375 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xd5th\" (UniqueName: \"kubernetes.io/projected/393a2ba8-e38c-44ba-b0ab-b124bab1c371-kube-api-access-xd5th\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.023410 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.030076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xgs8d" event={"ID":"393a2ba8-e38c-44ba-b0ab-b124bab1c371","Type":"ContainerDied","Data":"a6cea2f4839f2974e0a29ec9a354b868bb3761701c3e2653c806468699b25062"} Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.030160 4884 scope.go:117] "RemoveContainer" containerID="99014b2180ed9cda21909eb52f56a11c8cbd57954861f921b4eecd473ed76580" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.030175 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xgs8d" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.045549 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "393a2ba8-e38c-44ba-b0ab-b124bab1c371" (UID: "393a2ba8-e38c-44ba-b0ab-b124bab1c371"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.124254 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/393a2ba8-e38c-44ba-b0ab-b124bab1c371-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.361211 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.364193 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xgs8d"] Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.469289 4884 scope.go:117] "RemoveContainer" containerID="e215655545aac3a1a9ff21c6879b22ada9518f59aae02568f50a6f11ede795dd" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.545506 4884 scope.go:117] "RemoveContainer" containerID="e71ab8f59d99fc229203f0851c27a3a6eaf2961ebdd4d6a6ead2c7468f59d5b1" Nov 28 15:34:20 crc kubenswrapper[4884]: I1128 15:34:20.698383 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" path="/var/lib/kubelet/pods/393a2ba8-e38c-44ba-b0ab-b124bab1c371/volumes" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.035876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" event={"ID":"069d1c60-3d48-4f4b-848d-3dd850f26e3a","Type":"ContainerStarted","Data":"b0a15212f4bc11ab3ed95f3f85e17c9d7496b5764496453461477905f6aff232"} Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.037451 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" event={"ID":"b46aa294-0c5d-47f8-89a5-08b55bc74c95","Type":"ContainerStarted","Data":"003a36ca9f1b298915d37bdd7e0e151d7040aa3e90db22aa5de1dbba6c3defa7"} Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.037673 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.039687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qpkc4" event={"ID":"85a9a43f-4884-4277-af0b-2e7c0d88de32","Type":"ContainerStarted","Data":"d654bbe87fdadc45860a4872564fa0acfaa7825c5661fc32a371743d6338d1a4"} Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.039897 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.041127 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" event={"ID":"4deccde2-5e37-4721-83a8-aaeacb1ccbe6","Type":"ContainerStarted","Data":"2d322553bdbd0db5772017698dbfdf08b23dc8136df0fc6ba04aa9df71514571"} Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.049013 4884 scope.go:117] "RemoveContainer" containerID="f3401938bf7dbfdd67d846882d2e44239a96c28d4a82bbf37d9d50fd4dc57f2f" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.073841 4884 scope.go:117] "RemoveContainer" containerID="ff6e9389bed8b22e39e4557b0c1702419e813ea6ba355ad83ab8bb9956d1cde5" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.084581 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-4nnzl" podStartSLOduration=2.633638 podStartE2EDuration="9.0845591s" podCreationTimestamp="2025-11-28 15:34:12 +0000 UTC" firstStartedPulling="2025-11-28 15:34:14.135692764 +0000 UTC m=+893.698476565" lastFinishedPulling="2025-11-28 15:34:20.586613854 +0000 UTC m=+900.149397665" observedRunningTime="2025-11-28 15:34:21.062364335 +0000 UTC m=+900.625148136" watchObservedRunningTime="2025-11-28 15:34:21.0845591 +0000 UTC m=+900.647342901" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.087410 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-qpkc4" podStartSLOduration=1.6030548869999999 podStartE2EDuration="9.087400369s" podCreationTimestamp="2025-11-28 15:34:12 +0000 UTC" firstStartedPulling="2025-11-28 15:34:13.082398734 +0000 UTC m=+892.645182525" lastFinishedPulling="2025-11-28 15:34:20.566744166 +0000 UTC m=+900.129528007" observedRunningTime="2025-11-28 15:34:21.083618427 +0000 UTC m=+900.646402258" watchObservedRunningTime="2025-11-28 15:34:21.087400369 +0000 UTC m=+900.650184170" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.106467 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" podStartSLOduration=1.952737896 podStartE2EDuration="9.106440747s" podCreationTimestamp="2025-11-28 15:34:12 +0000 UTC" firstStartedPulling="2025-11-28 15:34:13.433228881 +0000 UTC m=+892.996012672" lastFinishedPulling="2025-11-28 15:34:20.586931692 +0000 UTC m=+900.149715523" observedRunningTime="2025-11-28 15:34:21.10414986 +0000 UTC m=+900.666933671" watchObservedRunningTime="2025-11-28 15:34:21.106440747 +0000 UTC m=+900.669224548" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.114177 4884 scope.go:117] "RemoveContainer" containerID="781f0d53204742f2b592ac8b7ce9e8a01016089353c45c10616c080553c36afb" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.147598 4884 scope.go:117] "RemoveContainer" containerID="8bc1c7ca8a998ef63a4ffdca24e240c78bf43ab42959a91f264ae55b3218ff34" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.179332 4884 scope.go:117] "RemoveContainer" containerID="d22993b2ee7b5ee1adea51ceb9d05fb2103a40a3f41510b9b13e9e1e0879b633" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.194880 4884 scope.go:117] "RemoveContainer" containerID="5a039da997ea0a2808b6fffdaed3ad6c234d6a93a0bb8631a8e8f4f0f21d4d0c" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.215271 4884 scope.go:117] "RemoveContainer" containerID="7082f5a2b33206661ed66bc08a42485d0b9f2eb49b2ee5fd7688ed0078da1be8" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.233422 4884 scope.go:117] "RemoveContainer" containerID="e0e1fec741e585e68dcb50d04decc7edfb67f8cb29439f666ba11c0c1be2efdf" Nov 28 15:34:21 crc kubenswrapper[4884]: I1128 15:34:21.246373 4884 scope.go:117] "RemoveContainer" containerID="349ef8c2ea47aa19611405f0c970eaafaba1f176e3a68a0ef100314ee95af8e4" Nov 28 15:34:23 crc kubenswrapper[4884]: I1128 15:34:23.347073 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:23 crc kubenswrapper[4884]: I1128 15:34:23.347570 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:23 crc kubenswrapper[4884]: I1128 15:34:23.355378 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:24 crc kubenswrapper[4884]: I1128 15:34:24.069494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" event={"ID":"4deccde2-5e37-4721-83a8-aaeacb1ccbe6","Type":"ContainerStarted","Data":"2335924c8224a39d3a14fa809e8c6631314147e3c0a1ffb4856ce6e4c0ed213f"} Nov 28 15:34:24 crc kubenswrapper[4884]: I1128 15:34:24.073828 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-54bc4c7f48-htk8l" Nov 28 15:34:24 crc kubenswrapper[4884]: I1128 15:34:24.095331 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-hpbq5" podStartSLOduration=2.090993979 podStartE2EDuration="12.095312933s" podCreationTimestamp="2025-11-28 15:34:12 +0000 UTC" firstStartedPulling="2025-11-28 15:34:13.340319152 +0000 UTC m=+892.903102953" lastFinishedPulling="2025-11-28 15:34:23.344638106 +0000 UTC m=+902.907421907" observedRunningTime="2025-11-28 15:34:24.091959681 +0000 UTC m=+903.654743492" watchObservedRunningTime="2025-11-28 15:34:24.095312933 +0000 UTC m=+903.658096734" Nov 28 15:34:24 crc kubenswrapper[4884]: I1128 15:34:24.151899 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:34:28 crc kubenswrapper[4884]: I1128 15:34:28.080824 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-qpkc4" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.162133 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:31 crc kubenswrapper[4884]: E1128 15:34:31.162598 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="registry-server" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.162633 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="registry-server" Nov 28 15:34:31 crc kubenswrapper[4884]: E1128 15:34:31.162659 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="extract-utilities" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.162678 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="extract-utilities" Nov 28 15:34:31 crc kubenswrapper[4884]: E1128 15:34:31.162726 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="extract-content" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.162754 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="extract-content" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.163009 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="393a2ba8-e38c-44ba-b0ab-b124bab1c371" containerName="registry-server" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.164638 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.173259 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.194762 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.194857 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-794sn\" (UniqueName: \"kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.194917 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.296075 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.296218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-794sn\" (UniqueName: \"kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.296264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.296491 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.296675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.320838 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-794sn\" (UniqueName: \"kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn\") pod \"certified-operators-w8sst\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.509282 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:31 crc kubenswrapper[4884]: I1128 15:34:31.946930 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:32 crc kubenswrapper[4884]: I1128 15:34:32.127425 4884 generic.go:334] "Generic (PLEG): container finished" podID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerID="ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1" exitCode=0 Nov 28 15:34:32 crc kubenswrapper[4884]: I1128 15:34:32.127467 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerDied","Data":"ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1"} Nov 28 15:34:32 crc kubenswrapper[4884]: I1128 15:34:32.127492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerStarted","Data":"8be55edc7d7ceb30104072de0acf08bfeeb9ac6f5a95dce75698c19e96b8f654"} Nov 28 15:34:33 crc kubenswrapper[4884]: I1128 15:34:33.066338 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-45zjs" Nov 28 15:34:33 crc kubenswrapper[4884]: I1128 15:34:33.136446 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerStarted","Data":"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8"} Nov 28 15:34:34 crc kubenswrapper[4884]: I1128 15:34:34.144238 4884 generic.go:334] "Generic (PLEG): container finished" podID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerID="69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8" exitCode=0 Nov 28 15:34:34 crc kubenswrapper[4884]: I1128 15:34:34.144354 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerDied","Data":"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8"} Nov 28 15:34:35 crc kubenswrapper[4884]: I1128 15:34:35.151170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerStarted","Data":"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5"} Nov 28 15:34:35 crc kubenswrapper[4884]: I1128 15:34:35.175109 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w8sst" podStartSLOduration=1.601564386 podStartE2EDuration="4.175067597s" podCreationTimestamp="2025-11-28 15:34:31 +0000 UTC" firstStartedPulling="2025-11-28 15:34:32.130046572 +0000 UTC m=+911.692830373" lastFinishedPulling="2025-11-28 15:34:34.703549783 +0000 UTC m=+914.266333584" observedRunningTime="2025-11-28 15:34:35.171424613 +0000 UTC m=+914.734208464" watchObservedRunningTime="2025-11-28 15:34:35.175067597 +0000 UTC m=+914.737851398" Nov 28 15:34:36 crc kubenswrapper[4884]: I1128 15:34:36.957908 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:36 crc kubenswrapper[4884]: I1128 15:34:36.959267 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:36 crc kubenswrapper[4884]: I1128 15:34:36.966505 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.066030 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.066139 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.066175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxxmj\" (UniqueName: \"kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.167972 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.168058 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.168103 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxxmj\" (UniqueName: \"kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.168602 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.168671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.187584 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxxmj\" (UniqueName: \"kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj\") pod \"redhat-marketplace-c6mh4\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.280984 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:37 crc kubenswrapper[4884]: I1128 15:34:37.713053 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:38 crc kubenswrapper[4884]: I1128 15:34:38.169570 4884 generic.go:334] "Generic (PLEG): container finished" podID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerID="2858e77dcd7f0729108f24cf6e2f6ce7861e8c9a4ff55beee98782ca0f44ec47" exitCode=0 Nov 28 15:34:38 crc kubenswrapper[4884]: I1128 15:34:38.169858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerDied","Data":"2858e77dcd7f0729108f24cf6e2f6ce7861e8c9a4ff55beee98782ca0f44ec47"} Nov 28 15:34:38 crc kubenswrapper[4884]: I1128 15:34:38.171885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerStarted","Data":"074fd79a7e7f33f6378f519bee8ad4c2f780d87f1099fecfcb2d3a0a80aca26f"} Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.188649 4884 generic.go:334] "Generic (PLEG): container finished" podID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerID="1706c6d9a49f3feefa407d68a38b24575cc6edf34485ebbe852124b5247797bf" exitCode=0 Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.188754 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerDied","Data":"1706c6d9a49f3feefa407d68a38b24575cc6edf34485ebbe852124b5247797bf"} Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.545920 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.547391 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.561713 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.611726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.611773 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmc5g\" (UniqueName: \"kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.611811 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.713631 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.713710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.713744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmc5g\" (UniqueName: \"kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.714477 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.714501 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.739643 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmc5g\" (UniqueName: \"kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g\") pod \"community-operators-qpzqd\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:40 crc kubenswrapper[4884]: I1128 15:34:40.866150 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.198419 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerStarted","Data":"8c0fdbe9c6def4c1564a8a1596bb4427729e0ed0099deb4ed967a124f061cd03"} Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.214430 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c6mh4" podStartSLOduration=2.709452594 podStartE2EDuration="5.214417516s" podCreationTimestamp="2025-11-28 15:34:36 +0000 UTC" firstStartedPulling="2025-11-28 15:34:38.171486295 +0000 UTC m=+917.734270106" lastFinishedPulling="2025-11-28 15:34:40.676451227 +0000 UTC m=+920.239235028" observedRunningTime="2025-11-28 15:34:41.212243861 +0000 UTC m=+920.775027662" watchObservedRunningTime="2025-11-28 15:34:41.214417516 +0000 UTC m=+920.777201317" Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.301414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.509959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.510304 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:41 crc kubenswrapper[4884]: I1128 15:34:41.545518 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:42 crc kubenswrapper[4884]: I1128 15:34:42.209135 4884 generic.go:334] "Generic (PLEG): container finished" podID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerID="fff00de6da35699fd897cbf418b53768e8cce0f6c1fa5c8ae50d8e2b448a5596" exitCode=0 Nov 28 15:34:42 crc kubenswrapper[4884]: I1128 15:34:42.212405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerDied","Data":"fff00de6da35699fd897cbf418b53768e8cce0f6c1fa5c8ae50d8e2b448a5596"} Nov 28 15:34:42 crc kubenswrapper[4884]: I1128 15:34:42.212546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerStarted","Data":"06260d6d5e7501b1c706aaf2ffe141b4fdd24d2083d48998733fa5877063bee5"} Nov 28 15:34:42 crc kubenswrapper[4884]: I1128 15:34:42.272416 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:45 crc kubenswrapper[4884]: I1128 15:34:45.231017 4884 generic.go:334] "Generic (PLEG): container finished" podID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerID="5a76d78ee8181df3bca22b873c068a7454f8d828a5122e16f6780b02c04b8ce4" exitCode=0 Nov 28 15:34:45 crc kubenswrapper[4884]: I1128 15:34:45.231141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerDied","Data":"5a76d78ee8181df3bca22b873c068a7454f8d828a5122e16f6780b02c04b8ce4"} Nov 28 15:34:45 crc kubenswrapper[4884]: I1128 15:34:45.541392 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:45 crc kubenswrapper[4884]: I1128 15:34:45.541907 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w8sst" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="registry-server" containerID="cri-o://60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5" gracePeriod=2 Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.008872 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.109777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-794sn\" (UniqueName: \"kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn\") pod \"acec136d-895f-4c65-a414-c9f9fb14ff78\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.109926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content\") pod \"acec136d-895f-4c65-a414-c9f9fb14ff78\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.109961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities\") pod \"acec136d-895f-4c65-a414-c9f9fb14ff78\" (UID: \"acec136d-895f-4c65-a414-c9f9fb14ff78\") " Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.111228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities" (OuterVolumeSpecName: "utilities") pod "acec136d-895f-4c65-a414-c9f9fb14ff78" (UID: "acec136d-895f-4c65-a414-c9f9fb14ff78"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.116272 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn" (OuterVolumeSpecName: "kube-api-access-794sn") pod "acec136d-895f-4c65-a414-c9f9fb14ff78" (UID: "acec136d-895f-4c65-a414-c9f9fb14ff78"). InnerVolumeSpecName "kube-api-access-794sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.173595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acec136d-895f-4c65-a414-c9f9fb14ff78" (UID: "acec136d-895f-4c65-a414-c9f9fb14ff78"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.190674 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw"] Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.190877 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="extract-utilities" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.190888 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="extract-utilities" Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.190903 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="extract-content" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.190909 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="extract-content" Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.190926 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="registry-server" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.190931 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="registry-server" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.191041 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerName="registry-server" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.191756 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.194209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.205334 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw"] Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.211305 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.211337 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-794sn\" (UniqueName: \"kubernetes.io/projected/acec136d-895f-4c65-a414-c9f9fb14ff78-kube-api-access-794sn\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.211347 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acec136d-895f-4c65-a414-c9f9fb14ff78-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.248300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerStarted","Data":"ec734d9c055ecfedc0d3aa30cc15135bcc8f78fc0a0a9b4b67f7bf300e361fb5"} Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.251330 4884 generic.go:334] "Generic (PLEG): container finished" podID="acec136d-895f-4c65-a414-c9f9fb14ff78" containerID="60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5" exitCode=0 Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.251375 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerDied","Data":"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5"} Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.251402 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8sst" event={"ID":"acec136d-895f-4c65-a414-c9f9fb14ff78","Type":"ContainerDied","Data":"8be55edc7d7ceb30104072de0acf08bfeeb9ac6f5a95dce75698c19e96b8f654"} Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.251471 4884 scope.go:117] "RemoveContainer" containerID="60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.251643 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8sst" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.270492 4884 scope.go:117] "RemoveContainer" containerID="69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.271503 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qpzqd" podStartSLOduration=2.93175126 podStartE2EDuration="7.27148953s" podCreationTimestamp="2025-11-28 15:34:40 +0000 UTC" firstStartedPulling="2025-11-28 15:34:42.214604142 +0000 UTC m=+921.777387943" lastFinishedPulling="2025-11-28 15:34:46.554342372 +0000 UTC m=+926.117126213" observedRunningTime="2025-11-28 15:34:47.266837221 +0000 UTC m=+926.829621022" watchObservedRunningTime="2025-11-28 15:34:47.27148953 +0000 UTC m=+926.834273331" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.284368 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.284456 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.287107 4884 scope.go:117] "RemoveContainer" containerID="ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.310223 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.311720 4884 scope.go:117] "RemoveContainer" containerID="60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5" Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.312146 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5\": container with ID starting with 60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5 not found: ID does not exist" containerID="60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.312186 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5"} err="failed to get container status \"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5\": rpc error: code = NotFound desc = could not find container \"60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5\": container with ID starting with 60ed5f9af65bb5fdf3df9bf4001fe2628be539258af4674d017472129c3ad1f5 not found: ID does not exist" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.312212 4884 scope.go:117] "RemoveContainer" containerID="69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.312605 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.312756 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.312844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzgkd\" (UniqueName: \"kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.312623 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8\": container with ID starting with 69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8 not found: ID does not exist" containerID="69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.313040 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8"} err="failed to get container status \"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8\": rpc error: code = NotFound desc = could not find container \"69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8\": container with ID starting with 69ef4c5a866eee2d154e4989acf303fa576148d6b1c6898d9c2ef0039c910ef8 not found: ID does not exist" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.313138 4884 scope.go:117] "RemoveContainer" containerID="ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1" Nov 28 15:34:47 crc kubenswrapper[4884]: E1128 15:34:47.313716 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1\": container with ID starting with ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1 not found: ID does not exist" containerID="ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.313755 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1"} err="failed to get container status \"ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1\": rpc error: code = NotFound desc = could not find container \"ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1\": container with ID starting with ef2f2febefb9666cb182a19396f97bde40ecf1b1c035a23d9a425d1ad4beaed1 not found: ID does not exist" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.314178 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w8sst"] Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.331882 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.413833 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.413930 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.413953 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzgkd\" (UniqueName: \"kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.414784 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.414995 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.431376 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzgkd\" (UniqueName: \"kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.508571 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:47 crc kubenswrapper[4884]: I1128 15:34:47.744911 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw"] Nov 28 15:34:47 crc kubenswrapper[4884]: W1128 15:34:47.750721 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac2d8399_8722_44d6_ac8f_57be77e84017.slice/crio-98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504 WatchSource:0}: Error finding container 98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504: Status 404 returned error can't find the container with id 98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504 Nov 28 15:34:48 crc kubenswrapper[4884]: I1128 15:34:48.261579 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerID="d604ddc439dff5daaab5d85d8150ec7c314b864496fb6b1e0bc9a66cfe6092e3" exitCode=0 Nov 28 15:34:48 crc kubenswrapper[4884]: I1128 15:34:48.261643 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" event={"ID":"ac2d8399-8722-44d6-ac8f-57be77e84017","Type":"ContainerDied","Data":"d604ddc439dff5daaab5d85d8150ec7c314b864496fb6b1e0bc9a66cfe6092e3"} Nov 28 15:34:48 crc kubenswrapper[4884]: I1128 15:34:48.261900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" event={"ID":"ac2d8399-8722-44d6-ac8f-57be77e84017","Type":"ContainerStarted","Data":"98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504"} Nov 28 15:34:48 crc kubenswrapper[4884]: I1128 15:34:48.314381 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:48 crc kubenswrapper[4884]: I1128 15:34:48.700269 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acec136d-895f-4c65-a414-c9f9fb14ff78" path="/var/lib/kubelet/pods/acec136d-895f-4c65-a414-c9f9fb14ff78/volumes" Nov 28 15:34:49 crc kubenswrapper[4884]: I1128 15:34:49.195134 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-tqjn2" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerName="console" containerID="cri-o://73da5938365cfacc272b1076e251360798b528f2d9347c60d331e90c980511c5" gracePeriod=15 Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.280923 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqjn2_fca97aac-84bd-4f0b-93b0-f7a3f641076b/console/0.log" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.281312 4884 generic.go:334] "Generic (PLEG): container finished" podID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerID="73da5938365cfacc272b1076e251360798b528f2d9347c60d331e90c980511c5" exitCode=2 Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.281439 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqjn2" event={"ID":"fca97aac-84bd-4f0b-93b0-f7a3f641076b","Type":"ContainerDied","Data":"73da5938365cfacc272b1076e251360798b528f2d9347c60d331e90c980511c5"} Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.686147 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqjn2_fca97aac-84bd-4f0b-93b0-f7a3f641076b/console/0.log" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.686472 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvl8m\" (UniqueName: \"kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864749 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864771 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864790 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864816 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864863 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.864906 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle\") pod \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\" (UID: \"fca97aac-84bd-4f0b-93b0-f7a3f641076b\") " Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.865510 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.865562 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca" (OuterVolumeSpecName: "service-ca") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.865632 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.865756 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config" (OuterVolumeSpecName: "console-config") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.866396 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.866622 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.870531 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m" (OuterVolumeSpecName: "kube-api-access-fvl8m") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "kube-api-access-fvl8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.871218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.871499 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fca97aac-84bd-4f0b-93b0-f7a3f641076b" (UID: "fca97aac-84bd-4f0b-93b0-f7a3f641076b"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.942435 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966127 4884 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966155 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966164 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvl8m\" (UniqueName: \"kubernetes.io/projected/fca97aac-84bd-4f0b-93b0-f7a3f641076b-kube-api-access-fvl8m\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966174 4884 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966258 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966269 4884 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fca97aac-84bd-4f0b-93b0-f7a3f641076b-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:50 crc kubenswrapper[4884]: I1128 15:34:50.966277 4884 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fca97aac-84bd-4f0b-93b0-f7a3f641076b-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.243560 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.243654 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.289378 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqjn2_fca97aac-84bd-4f0b-93b0-f7a3f641076b/console/0.log" Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.289503 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqjn2" Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.290187 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqjn2" event={"ID":"fca97aac-84bd-4f0b-93b0-f7a3f641076b","Type":"ContainerDied","Data":"6befeb37e618bb0773ce4b2a7a5e696613ef60023894c9341ddcbd2ec28bd02d"} Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.290244 4884 scope.go:117] "RemoveContainer" containerID="73da5938365cfacc272b1076e251360798b528f2d9347c60d331e90c980511c5" Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.293332 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerID="7e63a81f9e89389c50bb5e54607194149000310343de1fe4c39046c6622315e3" exitCode=0 Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.293377 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" event={"ID":"ac2d8399-8722-44d6-ac8f-57be77e84017","Type":"ContainerDied","Data":"7e63a81f9e89389c50bb5e54607194149000310343de1fe4c39046c6622315e3"} Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.343167 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.350230 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-tqjn2"] Nov 28 15:34:51 crc kubenswrapper[4884]: I1128 15:34:51.351053 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:52 crc kubenswrapper[4884]: I1128 15:34:52.305326 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerID="122c276f0de58f6d909020b8cc3029baa5bd32f9da64160c64688f66022599dd" exitCode=0 Nov 28 15:34:52 crc kubenswrapper[4884]: I1128 15:34:52.305465 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" event={"ID":"ac2d8399-8722-44d6-ac8f-57be77e84017","Type":"ContainerDied","Data":"122c276f0de58f6d909020b8cc3029baa5bd32f9da64160c64688f66022599dd"} Nov 28 15:34:52 crc kubenswrapper[4884]: I1128 15:34:52.701135 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" path="/var/lib/kubelet/pods/fca97aac-84bd-4f0b-93b0-f7a3f641076b/volumes" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.588556 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.612291 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzgkd\" (UniqueName: \"kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd\") pod \"ac2d8399-8722-44d6-ac8f-57be77e84017\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.612361 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle\") pod \"ac2d8399-8722-44d6-ac8f-57be77e84017\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.612391 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util\") pod \"ac2d8399-8722-44d6-ac8f-57be77e84017\" (UID: \"ac2d8399-8722-44d6-ac8f-57be77e84017\") " Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.615540 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle" (OuterVolumeSpecName: "bundle") pod "ac2d8399-8722-44d6-ac8f-57be77e84017" (UID: "ac2d8399-8722-44d6-ac8f-57be77e84017"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.618042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd" (OuterVolumeSpecName: "kube-api-access-kzgkd") pod "ac2d8399-8722-44d6-ac8f-57be77e84017" (UID: "ac2d8399-8722-44d6-ac8f-57be77e84017"). InnerVolumeSpecName "kube-api-access-kzgkd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.687450 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util" (OuterVolumeSpecName: "util") pod "ac2d8399-8722-44d6-ac8f-57be77e84017" (UID: "ac2d8399-8722-44d6-ac8f-57be77e84017"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.714327 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzgkd\" (UniqueName: \"kubernetes.io/projected/ac2d8399-8722-44d6-ac8f-57be77e84017-kube-api-access-kzgkd\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.714376 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:53 crc kubenswrapper[4884]: I1128 15:34:53.714392 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ac2d8399-8722-44d6-ac8f-57be77e84017-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:54 crc kubenswrapper[4884]: I1128 15:34:54.322472 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" event={"ID":"ac2d8399-8722-44d6-ac8f-57be77e84017","Type":"ContainerDied","Data":"98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504"} Nov 28 15:34:54 crc kubenswrapper[4884]: I1128 15:34:54.322621 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98db035edd1b06bdfe21311f8d686728ca68acc3acb06b68e443691fd687e504" Nov 28 15:34:54 crc kubenswrapper[4884]: I1128 15:34:54.322539 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw" Nov 28 15:34:54 crc kubenswrapper[4884]: I1128 15:34:54.738321 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:54 crc kubenswrapper[4884]: I1128 15:34:54.738546 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c6mh4" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="registry-server" containerID="cri-o://8c0fdbe9c6def4c1564a8a1596bb4427729e0ed0099deb4ed967a124f061cd03" gracePeriod=2 Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.336012 4884 generic.go:334] "Generic (PLEG): container finished" podID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerID="8c0fdbe9c6def4c1564a8a1596bb4427729e0ed0099deb4ed967a124f061cd03" exitCode=0 Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.336076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerDied","Data":"8c0fdbe9c6def4c1564a8a1596bb4427729e0ed0099deb4ed967a124f061cd03"} Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.638791 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.839872 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content\") pod \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.839951 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities\") pod \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.840071 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxxmj\" (UniqueName: \"kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj\") pod \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\" (UID: \"d50c4670-9b1a-45b1-84b6-0f52db838f3f\") " Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.841671 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities" (OuterVolumeSpecName: "utilities") pod "d50c4670-9b1a-45b1-84b6-0f52db838f3f" (UID: "d50c4670-9b1a-45b1-84b6-0f52db838f3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.847310 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj" (OuterVolumeSpecName: "kube-api-access-kxxmj") pod "d50c4670-9b1a-45b1-84b6-0f52db838f3f" (UID: "d50c4670-9b1a-45b1-84b6-0f52db838f3f"). InnerVolumeSpecName "kube-api-access-kxxmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.858323 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d50c4670-9b1a-45b1-84b6-0f52db838f3f" (UID: "d50c4670-9b1a-45b1-84b6-0f52db838f3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.942049 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.942106 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50c4670-9b1a-45b1-84b6-0f52db838f3f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:55 crc kubenswrapper[4884]: I1128 15:34:55.942117 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxxmj\" (UniqueName: \"kubernetes.io/projected/d50c4670-9b1a-45b1-84b6-0f52db838f3f-kube-api-access-kxxmj\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.345493 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c6mh4" event={"ID":"d50c4670-9b1a-45b1-84b6-0f52db838f3f","Type":"ContainerDied","Data":"074fd79a7e7f33f6378f519bee8ad4c2f780d87f1099fecfcb2d3a0a80aca26f"} Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.345558 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c6mh4" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.345573 4884 scope.go:117] "RemoveContainer" containerID="8c0fdbe9c6def4c1564a8a1596bb4427729e0ed0099deb4ed967a124f061cd03" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.378853 4884 scope.go:117] "RemoveContainer" containerID="1706c6d9a49f3feefa407d68a38b24575cc6edf34485ebbe852124b5247797bf" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.399715 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.403054 4884 scope.go:117] "RemoveContainer" containerID="2858e77dcd7f0729108f24cf6e2f6ce7861e8c9a4ff55beee98782ca0f44ec47" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.405842 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c6mh4"] Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.703217 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" path="/var/lib/kubelet/pods/d50c4670-9b1a-45b1-84b6-0f52db838f3f/volumes" Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.940552 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:56 crc kubenswrapper[4884]: I1128 15:34:56.940765 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qpzqd" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="registry-server" containerID="cri-o://ec734d9c055ecfedc0d3aa30cc15135bcc8f78fc0a0a9b4b67f7bf300e361fb5" gracePeriod=2 Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.381159 4884 generic.go:334] "Generic (PLEG): container finished" podID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerID="ec734d9c055ecfedc0d3aa30cc15135bcc8f78fc0a0a9b4b67f7bf300e361fb5" exitCode=0 Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.381239 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerDied","Data":"ec734d9c055ecfedc0d3aa30cc15135bcc8f78fc0a0a9b4b67f7bf300e361fb5"} Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.492051 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.599207 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content\") pod \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.599279 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities\") pod \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.599311 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmc5g\" (UniqueName: \"kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g\") pod \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\" (UID: \"53e078bd-0818-4f69-bbed-01b1cfeeefd7\") " Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.600130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities" (OuterVolumeSpecName: "utilities") pod "53e078bd-0818-4f69-bbed-01b1cfeeefd7" (UID: "53e078bd-0818-4f69-bbed-01b1cfeeefd7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.612225 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g" (OuterVolumeSpecName: "kube-api-access-fmc5g") pod "53e078bd-0818-4f69-bbed-01b1cfeeefd7" (UID: "53e078bd-0818-4f69-bbed-01b1cfeeefd7"). InnerVolumeSpecName "kube-api-access-fmc5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.661682 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "53e078bd-0818-4f69-bbed-01b1cfeeefd7" (UID: "53e078bd-0818-4f69-bbed-01b1cfeeefd7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.701841 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.701883 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53e078bd-0818-4f69-bbed-01b1cfeeefd7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:58 crc kubenswrapper[4884]: I1128 15:34:58.701898 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmc5g\" (UniqueName: \"kubernetes.io/projected/53e078bd-0818-4f69-bbed-01b1cfeeefd7-kube-api-access-fmc5g\") on node \"crc\" DevicePath \"\"" Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.393236 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qpzqd" event={"ID":"53e078bd-0818-4f69-bbed-01b1cfeeefd7","Type":"ContainerDied","Data":"06260d6d5e7501b1c706aaf2ffe141b4fdd24d2083d48998733fa5877063bee5"} Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.393540 4884 scope.go:117] "RemoveContainer" containerID="ec734d9c055ecfedc0d3aa30cc15135bcc8f78fc0a0a9b4b67f7bf300e361fb5" Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.393326 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qpzqd" Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.414648 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.416099 4884 scope.go:117] "RemoveContainer" containerID="5a76d78ee8181df3bca22b873c068a7454f8d828a5122e16f6780b02c04b8ce4" Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.419073 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qpzqd"] Nov 28 15:34:59 crc kubenswrapper[4884]: I1128 15:34:59.428445 4884 scope.go:117] "RemoveContainer" containerID="fff00de6da35699fd897cbf418b53768e8cce0f6c1fa5c8ae50d8e2b448a5596" Nov 28 15:35:00 crc kubenswrapper[4884]: I1128 15:35:00.697840 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" path="/var/lib/kubelet/pods/53e078bd-0818-4f69-bbed-01b1cfeeefd7/volumes" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.561589 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-74588d6766-r92s8"] Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562014 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerName="console" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562026 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerName="console" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562039 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562045 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562053 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="extract-utilities" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562060 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="extract-utilities" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562069 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="extract-content" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562074 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="extract-content" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562083 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562105 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562115 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="util" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562120 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="util" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562129 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="extract" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562135 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="extract" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562141 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="extract-utilities" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562147 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="extract-utilities" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562155 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="pull" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562160 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="pull" Nov 28 15:35:03 crc kubenswrapper[4884]: E1128 15:35:03.562169 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="extract-content" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562176 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="extract-content" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562273 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac2d8399-8722-44d6-ac8f-57be77e84017" containerName="extract" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562284 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="53e078bd-0818-4f69-bbed-01b1cfeeefd7" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562293 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d50c4670-9b1a-45b1-84b6-0f52db838f3f" containerName="registry-server" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562302 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca97aac-84bd-4f0b-93b0-f7a3f641076b" containerName="console" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.562628 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.565451 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.566140 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.567315 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.570118 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.570775 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-trqp5" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.603456 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-74588d6766-r92s8"] Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.668381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-webhook-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.668456 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-apiservice-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.668492 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qnhx\" (UniqueName: \"kubernetes.io/projected/f1162fe6-2514-476c-b62c-ce0e88c06488-kube-api-access-9qnhx\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.769641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-webhook-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.769733 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-apiservice-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.769771 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qnhx\" (UniqueName: \"kubernetes.io/projected/f1162fe6-2514-476c-b62c-ce0e88c06488-kube-api-access-9qnhx\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.776607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-apiservice-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.786680 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f1162fe6-2514-476c-b62c-ce0e88c06488-webhook-cert\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.787307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qnhx\" (UniqueName: \"kubernetes.io/projected/f1162fe6-2514-476c-b62c-ce0e88c06488-kube-api-access-9qnhx\") pod \"metallb-operator-controller-manager-74588d6766-r92s8\" (UID: \"f1162fe6-2514-476c-b62c-ce0e88c06488\") " pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.880677 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.897058 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n"] Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.897758 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.906853 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-bg4bx" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.907201 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.907204 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.908525 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n"] Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.971393 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8j55\" (UniqueName: \"kubernetes.io/projected/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-kube-api-access-k8j55\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.971459 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-apiservice-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:03 crc kubenswrapper[4884]: I1128 15:35:03.971628 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-webhook-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.073983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8j55\" (UniqueName: \"kubernetes.io/projected/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-kube-api-access-k8j55\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.074036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-apiservice-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.074056 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-webhook-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.082938 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-webhook-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.083618 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-apiservice-cert\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.096322 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8j55\" (UniqueName: \"kubernetes.io/projected/40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf-kube-api-access-k8j55\") pod \"metallb-operator-webhook-server-8c64fc959-7s75n\" (UID: \"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf\") " pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.130915 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-74588d6766-r92s8"] Nov 28 15:35:04 crc kubenswrapper[4884]: W1128 15:35:04.136343 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1162fe6_2514_476c_b62c_ce0e88c06488.slice/crio-5d7ca8e5a25989dbbdfae896060b5a421997be24fa3c41da6950e21da5d3f8a1 WatchSource:0}: Error finding container 5d7ca8e5a25989dbbdfae896060b5a421997be24fa3c41da6950e21da5d3f8a1: Status 404 returned error can't find the container with id 5d7ca8e5a25989dbbdfae896060b5a421997be24fa3c41da6950e21da5d3f8a1 Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.247799 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.420840 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" event={"ID":"f1162fe6-2514-476c-b62c-ce0e88c06488","Type":"ContainerStarted","Data":"5d7ca8e5a25989dbbdfae896060b5a421997be24fa3c41da6950e21da5d3f8a1"} Nov 28 15:35:04 crc kubenswrapper[4884]: I1128 15:35:04.695845 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n"] Nov 28 15:35:04 crc kubenswrapper[4884]: W1128 15:35:04.705342 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40f0f89b_3ee3_42ac_a5c3_3e3f5f042bdf.slice/crio-9d36dbeb72c05c6dc8aaaa3c48bc88b7838c79ad912fac25371059b9c6d9e47a WatchSource:0}: Error finding container 9d36dbeb72c05c6dc8aaaa3c48bc88b7838c79ad912fac25371059b9c6d9e47a: Status 404 returned error can't find the container with id 9d36dbeb72c05c6dc8aaaa3c48bc88b7838c79ad912fac25371059b9c6d9e47a Nov 28 15:35:05 crc kubenswrapper[4884]: I1128 15:35:05.427525 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" event={"ID":"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf","Type":"ContainerStarted","Data":"9d36dbeb72c05c6dc8aaaa3c48bc88b7838c79ad912fac25371059b9c6d9e47a"} Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.474822 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" event={"ID":"f1162fe6-2514-476c-b62c-ce0e88c06488","Type":"ContainerStarted","Data":"4f33f263b3a446a9cbd3c521bcdbdd1e0247b9fca99ca03d31249115dc973fa2"} Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.475751 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.477076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" event={"ID":"40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf","Type":"ContainerStarted","Data":"ad045cd7c08fdc3ade1c77ac550ebcff207bb2e6a274d495cf57efae4670590a"} Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.477741 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.496547 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" podStartSLOduration=2.207700297 podStartE2EDuration="7.49652929s" podCreationTimestamp="2025-11-28 15:35:03 +0000 UTC" firstStartedPulling="2025-11-28 15:35:04.139457655 +0000 UTC m=+943.702241456" lastFinishedPulling="2025-11-28 15:35:09.428286648 +0000 UTC m=+948.991070449" observedRunningTime="2025-11-28 15:35:10.492188408 +0000 UTC m=+950.054972229" watchObservedRunningTime="2025-11-28 15:35:10.49652929 +0000 UTC m=+950.059313091" Nov 28 15:35:10 crc kubenswrapper[4884]: I1128 15:35:10.510826 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" podStartSLOduration=2.761654246 podStartE2EDuration="7.510804446s" podCreationTimestamp="2025-11-28 15:35:03 +0000 UTC" firstStartedPulling="2025-11-28 15:35:04.708197004 +0000 UTC m=+944.270980805" lastFinishedPulling="2025-11-28 15:35:09.457347204 +0000 UTC m=+949.020131005" observedRunningTime="2025-11-28 15:35:10.510591401 +0000 UTC m=+950.073375202" watchObservedRunningTime="2025-11-28 15:35:10.510804446 +0000 UTC m=+950.073588257" Nov 28 15:35:21 crc kubenswrapper[4884]: I1128 15:35:21.242762 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:35:21 crc kubenswrapper[4884]: I1128 15:35:21.243242 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:35:24 crc kubenswrapper[4884]: I1128 15:35:24.255959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-8c64fc959-7s75n" Nov 28 15:35:43 crc kubenswrapper[4884]: I1128 15:35:43.884171 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-74588d6766-r92s8" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.585532 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-8ct25"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.588036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.590957 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.592574 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-8cmlw" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.595554 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.606269 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.614334 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.618372 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.637643 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.697226 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-7vzh9"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.697989 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.698542 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-qcdlm"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699224 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699259 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-conf\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7mtz\" (UniqueName: \"kubernetes.io/projected/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-kube-api-access-p7mtz\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699321 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics-certs\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699335 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-sockets\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699351 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-startup\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-reloader\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.699698 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.700040 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.700077 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.701813 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-whvbm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.701942 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.708198 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-qcdlm"] Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.800967 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-conf\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801023 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801043 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801146 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzdgh\" (UniqueName: \"kubernetes.io/projected/7919a63a-0933-4ed1-a9e7-3235165603b9-kube-api-access-nzdgh\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801183 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7mtz\" (UniqueName: \"kubernetes.io/projected/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-kube-api-access-p7mtz\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801201 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics-certs\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-sockets\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801234 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-cert\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801248 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/84cf5151-9588-4977-ac9b-c629f14f95c4-metallb-excludel2\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-startup\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7919a63a-0933-4ed1-a9e7-3235165603b9-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801470 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-conf\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801937 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-sockets\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.801995 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfn7r\" (UniqueName: \"kubernetes.io/projected/723c2bb6-327d-4a9c-8590-03fad405f0e9-kube-api-access-bfn7r\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802023 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-frr-startup\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802110 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-reloader\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vclk\" (UniqueName: \"kubernetes.io/projected/84cf5151-9588-4977-ac9b-c629f14f95c4-kube-api-access-5vclk\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802182 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-metrics-certs\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802360 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-reloader\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.802408 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.822791 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-metrics-certs\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.829704 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7mtz\" (UniqueName: \"kubernetes.io/projected/80b94e5d-88d1-435d-8cb8-35c0e2fab7dd-kube-api-access-p7mtz\") pod \"frr-k8s-8ct25\" (UID: \"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd\") " pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903281 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzdgh\" (UniqueName: \"kubernetes.io/projected/7919a63a-0933-4ed1-a9e7-3235165603b9-kube-api-access-nzdgh\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903338 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-cert\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/84cf5151-9588-4977-ac9b-c629f14f95c4-metallb-excludel2\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903373 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7919a63a-0933-4ed1-a9e7-3235165603b9-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903389 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfn7r\" (UniqueName: \"kubernetes.io/projected/723c2bb6-327d-4a9c-8590-03fad405f0e9-kube-api-access-bfn7r\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903416 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vclk\" (UniqueName: \"kubernetes.io/projected/84cf5151-9588-4977-ac9b-c629f14f95c4-kube-api-access-5vclk\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903454 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-metrics-certs\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903486 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.903505 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: E1128 15:35:44.903608 4884 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 28 15:35:44 crc kubenswrapper[4884]: E1128 15:35:44.903654 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs podName:723c2bb6-327d-4a9c-8590-03fad405f0e9 nodeName:}" failed. No retries permitted until 2025-11-28 15:35:45.40363938 +0000 UTC m=+984.966423171 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs") pod "controller-f8648f98b-qcdlm" (UID: "723c2bb6-327d-4a9c-8590-03fad405f0e9") : secret "controller-certs-secret" not found Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.904695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/84cf5151-9588-4977-ac9b-c629f14f95c4-metallb-excludel2\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: E1128 15:35:44.905161 4884 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 15:35:44 crc kubenswrapper[4884]: E1128 15:35:44.905197 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist podName:84cf5151-9588-4977-ac9b-c629f14f95c4 nodeName:}" failed. No retries permitted until 2025-11-28 15:35:45.405187419 +0000 UTC m=+984.967971220 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist") pod "speaker-7vzh9" (UID: "84cf5151-9588-4977-ac9b-c629f14f95c4") : secret "metallb-memberlist" not found Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.907277 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.907955 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-metrics-certs\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.909294 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7919a63a-0933-4ed1-a9e7-3235165603b9-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.917475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-cert\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.921245 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfn7r\" (UniqueName: \"kubernetes.io/projected/723c2bb6-327d-4a9c-8590-03fad405f0e9-kube-api-access-bfn7r\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.922514 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzdgh\" (UniqueName: \"kubernetes.io/projected/7919a63a-0933-4ed1-a9e7-3235165603b9-kube-api-access-nzdgh\") pod \"frr-k8s-webhook-server-7fcb986d4-mmq74\" (UID: \"7919a63a-0933-4ed1-a9e7-3235165603b9\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.925347 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vclk\" (UniqueName: \"kubernetes.io/projected/84cf5151-9588-4977-ac9b-c629f14f95c4-kube-api-access-5vclk\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.935327 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:44 crc kubenswrapper[4884]: I1128 15:35:44.940392 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.192904 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74"] Nov 28 15:35:45 crc kubenswrapper[4884]: W1128 15:35:45.195921 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7919a63a_0933_4ed1_a9e7_3235165603b9.slice/crio-5590b2063e87efaf9756fedd0941e9115316de6f81b1dfb48a12f443ce3d6046 WatchSource:0}: Error finding container 5590b2063e87efaf9756fedd0941e9115316de6f81b1dfb48a12f443ce3d6046: Status 404 returned error can't find the container with id 5590b2063e87efaf9756fedd0941e9115316de6f81b1dfb48a12f443ce3d6046 Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.409988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.410041 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:45 crc kubenswrapper[4884]: E1128 15:35:45.410686 4884 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 15:35:45 crc kubenswrapper[4884]: E1128 15:35:45.410756 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist podName:84cf5151-9588-4977-ac9b-c629f14f95c4 nodeName:}" failed. No retries permitted until 2025-11-28 15:35:46.410738523 +0000 UTC m=+985.973522324 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist") pod "speaker-7vzh9" (UID: "84cf5151-9588-4977-ac9b-c629f14f95c4") : secret "metallb-memberlist" not found Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.415854 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/723c2bb6-327d-4a9c-8590-03fad405f0e9-metrics-certs\") pod \"controller-f8648f98b-qcdlm\" (UID: \"723c2bb6-327d-4a9c-8590-03fad405f0e9\") " pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.639083 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.733848 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" event={"ID":"7919a63a-0933-4ed1-a9e7-3235165603b9","Type":"ContainerStarted","Data":"5590b2063e87efaf9756fedd0941e9115316de6f81b1dfb48a12f443ce3d6046"} Nov 28 15:35:45 crc kubenswrapper[4884]: I1128 15:35:45.882560 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-qcdlm"] Nov 28 15:35:45 crc kubenswrapper[4884]: W1128 15:35:45.889706 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod723c2bb6_327d_4a9c_8590_03fad405f0e9.slice/crio-2732e4d4076f236fe730abaf0a207cfe5d737fe6fa23b20029d4b84c814fae28 WatchSource:0}: Error finding container 2732e4d4076f236fe730abaf0a207cfe5d737fe6fa23b20029d4b84c814fae28: Status 404 returned error can't find the container with id 2732e4d4076f236fe730abaf0a207cfe5d737fe6fa23b20029d4b84c814fae28 Nov 28 15:35:46 crc kubenswrapper[4884]: I1128 15:35:46.422349 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:46 crc kubenswrapper[4884]: E1128 15:35:46.422514 4884 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 15:35:46 crc kubenswrapper[4884]: E1128 15:35:46.422578 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist podName:84cf5151-9588-4977-ac9b-c629f14f95c4 nodeName:}" failed. No retries permitted until 2025-11-28 15:35:48.422560601 +0000 UTC m=+987.985344402 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist") pod "speaker-7vzh9" (UID: "84cf5151-9588-4977-ac9b-c629f14f95c4") : secret "metallb-memberlist" not found Nov 28 15:35:46 crc kubenswrapper[4884]: I1128 15:35:46.741694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qcdlm" event={"ID":"723c2bb6-327d-4a9c-8590-03fad405f0e9","Type":"ContainerStarted","Data":"2732e4d4076f236fe730abaf0a207cfe5d737fe6fa23b20029d4b84c814fae28"} Nov 28 15:35:46 crc kubenswrapper[4884]: I1128 15:35:46.742733 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"6861d7d4cf72aaeaaf02320b24462dcfd0174617e3c75cd77e6806820945b222"} Nov 28 15:35:47 crc kubenswrapper[4884]: I1128 15:35:47.748918 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qcdlm" event={"ID":"723c2bb6-327d-4a9c-8590-03fad405f0e9","Type":"ContainerStarted","Data":"78c374c5421c247f18c4f9472fec490cdcc2e80054241bdcc28eec9b24b2d8a5"} Nov 28 15:35:47 crc kubenswrapper[4884]: I1128 15:35:47.749270 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-qcdlm" event={"ID":"723c2bb6-327d-4a9c-8590-03fad405f0e9","Type":"ContainerStarted","Data":"3373c2671b50aea8ac1ab8fd8809579893891dfafbe24d56ec8848dda96fbca4"} Nov 28 15:35:47 crc kubenswrapper[4884]: I1128 15:35:47.749328 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:35:47 crc kubenswrapper[4884]: I1128 15:35:47.774245 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-qcdlm" podStartSLOduration=3.774227588 podStartE2EDuration="3.774227588s" podCreationTimestamp="2025-11-28 15:35:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:35:47.767934462 +0000 UTC m=+987.330718263" watchObservedRunningTime="2025-11-28 15:35:47.774227588 +0000 UTC m=+987.337011389" Nov 28 15:35:48 crc kubenswrapper[4884]: I1128 15:35:48.467882 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:48 crc kubenswrapper[4884]: I1128 15:35:48.475702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/84cf5151-9588-4977-ac9b-c629f14f95c4-memberlist\") pod \"speaker-7vzh9\" (UID: \"84cf5151-9588-4977-ac9b-c629f14f95c4\") " pod="metallb-system/speaker-7vzh9" Nov 28 15:35:48 crc kubenswrapper[4884]: I1128 15:35:48.631601 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-7vzh9" Nov 28 15:35:48 crc kubenswrapper[4884]: W1128 15:35:48.663286 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84cf5151_9588_4977_ac9b_c629f14f95c4.slice/crio-9e9558d8ae3993f90c88ea1e60acdaced1f842067a8f1a2f2f18e32b6ea54776 WatchSource:0}: Error finding container 9e9558d8ae3993f90c88ea1e60acdaced1f842067a8f1a2f2f18e32b6ea54776: Status 404 returned error can't find the container with id 9e9558d8ae3993f90c88ea1e60acdaced1f842067a8f1a2f2f18e32b6ea54776 Nov 28 15:35:48 crc kubenswrapper[4884]: I1128 15:35:48.759611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7vzh9" event={"ID":"84cf5151-9588-4977-ac9b-c629f14f95c4","Type":"ContainerStarted","Data":"9e9558d8ae3993f90c88ea1e60acdaced1f842067a8f1a2f2f18e32b6ea54776"} Nov 28 15:35:49 crc kubenswrapper[4884]: I1128 15:35:49.766363 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7vzh9" event={"ID":"84cf5151-9588-4977-ac9b-c629f14f95c4","Type":"ContainerStarted","Data":"1069971d8c7a3dd2e152c10e164e7257a147211cb5888c2f4c5fae0f9e63d0d4"} Nov 28 15:35:49 crc kubenswrapper[4884]: I1128 15:35:49.766735 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-7vzh9" event={"ID":"84cf5151-9588-4977-ac9b-c629f14f95c4","Type":"ContainerStarted","Data":"f2dc2e147d3f6d6aafa56fb029a8a3714d01a37a3a886bb62dcee57c263bcaa3"} Nov 28 15:35:49 crc kubenswrapper[4884]: I1128 15:35:49.766780 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-7vzh9" Nov 28 15:35:49 crc kubenswrapper[4884]: I1128 15:35:49.782802 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-7vzh9" podStartSLOduration=5.782787389 podStartE2EDuration="5.782787389s" podCreationTimestamp="2025-11-28 15:35:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:35:49.781671971 +0000 UTC m=+989.344455762" watchObservedRunningTime="2025-11-28 15:35:49.782787389 +0000 UTC m=+989.345571190" Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.242581 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.242972 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.243019 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.243613 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.243667 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b" gracePeriod=600 Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.780161 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b" exitCode=0 Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.780211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b"} Nov 28 15:35:51 crc kubenswrapper[4884]: I1128 15:35:51.780246 4884 scope.go:117] "RemoveContainer" containerID="50e385994cb5d4fdd4e12c220735d7ad7864ce7ac7e374e05fb6cf32e28d143e" Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.798499 4884 generic.go:334] "Generic (PLEG): container finished" podID="80b94e5d-88d1-435d-8cb8-35c0e2fab7dd" containerID="eb68d29a7339c8eb910ac4681c5b5178a1247ebad35246884d691ef3644b0f95" exitCode=0 Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.799186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerDied","Data":"eb68d29a7339c8eb910ac4681c5b5178a1247ebad35246884d691ef3644b0f95"} Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.805901 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" event={"ID":"7919a63a-0933-4ed1-a9e7-3235165603b9","Type":"ContainerStarted","Data":"56b5b0dc9022b2a5215bbee3cd55047b988e64ae6b40f9c1465b95b2e07866be"} Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.806118 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.809281 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc"} Nov 28 15:35:53 crc kubenswrapper[4884]: I1128 15:35:53.886279 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" podStartSLOduration=2.203479283 podStartE2EDuration="9.88626293s" podCreationTimestamp="2025-11-28 15:35:44 +0000 UTC" firstStartedPulling="2025-11-28 15:35:45.198170227 +0000 UTC m=+984.760954028" lastFinishedPulling="2025-11-28 15:35:52.880953864 +0000 UTC m=+992.443737675" observedRunningTime="2025-11-28 15:35:53.88299067 +0000 UTC m=+993.445774471" watchObservedRunningTime="2025-11-28 15:35:53.88626293 +0000 UTC m=+993.449046731" Nov 28 15:35:54 crc kubenswrapper[4884]: I1128 15:35:54.817000 4884 generic.go:334] "Generic (PLEG): container finished" podID="80b94e5d-88d1-435d-8cb8-35c0e2fab7dd" containerID="6af3c520f6cf6a668b42e7bacdda71d41280648126c43b65565e3864df55f0e4" exitCode=0 Nov 28 15:35:54 crc kubenswrapper[4884]: I1128 15:35:54.817051 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerDied","Data":"6af3c520f6cf6a668b42e7bacdda71d41280648126c43b65565e3864df55f0e4"} Nov 28 15:35:55 crc kubenswrapper[4884]: I1128 15:35:55.825139 4884 generic.go:334] "Generic (PLEG): container finished" podID="80b94e5d-88d1-435d-8cb8-35c0e2fab7dd" containerID="d036e40248caef24faa2e3d63f11eb62355eb7566fbe5ab67eca391fe1cd67f2" exitCode=0 Nov 28 15:35:55 crc kubenswrapper[4884]: I1128 15:35:55.825635 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerDied","Data":"d036e40248caef24faa2e3d63f11eb62355eb7566fbe5ab67eca391fe1cd67f2"} Nov 28 15:35:56 crc kubenswrapper[4884]: I1128 15:35:56.837626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"7ffb48ef3325dbb2624657b0a524967c7b361f33247c5a3a0d5e0c9b6562b2fe"} Nov 28 15:35:56 crc kubenswrapper[4884]: I1128 15:35:56.837673 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"0d18a3cba1b9ee51f88fbb29139e563b8e602e964c90d83b3b5b59383747d99f"} Nov 28 15:35:56 crc kubenswrapper[4884]: I1128 15:35:56.837687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"c9bae9328e93ce9413db45b51fd6b26c8b4fe21d90b3606fed22c630b0535eab"} Nov 28 15:35:56 crc kubenswrapper[4884]: I1128 15:35:56.837699 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"709a446944647fdb047f4a17aa0ac56fe5d5e253e175048e84b2bff4b5fcd6e9"} Nov 28 15:35:56 crc kubenswrapper[4884]: I1128 15:35:56.837711 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"e25a5d3c7231d77a8aab295169eb64e67eca2b08b88c65174c8cd5a2819bc98d"} Nov 28 15:35:57 crc kubenswrapper[4884]: I1128 15:35:57.848594 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-8ct25" event={"ID":"80b94e5d-88d1-435d-8cb8-35c0e2fab7dd","Type":"ContainerStarted","Data":"6c9000c8fc18825dbea02864e62e4952f467017b0cb8a334898630963772ce92"} Nov 28 15:35:57 crc kubenswrapper[4884]: I1128 15:35:57.848935 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:57 crc kubenswrapper[4884]: I1128 15:35:57.871153 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-8ct25" podStartSLOduration=7.70257218 podStartE2EDuration="13.871135594s" podCreationTimestamp="2025-11-28 15:35:44 +0000 UTC" firstStartedPulling="2025-11-28 15:35:46.730786407 +0000 UTC m=+986.293570208" lastFinishedPulling="2025-11-28 15:35:52.899349811 +0000 UTC m=+992.462133622" observedRunningTime="2025-11-28 15:35:57.869212747 +0000 UTC m=+997.431996558" watchObservedRunningTime="2025-11-28 15:35:57.871135594 +0000 UTC m=+997.433919385" Nov 28 15:35:58 crc kubenswrapper[4884]: I1128 15:35:58.636596 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-7vzh9" Nov 28 15:35:59 crc kubenswrapper[4884]: I1128 15:35:59.936390 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:35:59 crc kubenswrapper[4884]: I1128 15:35:59.982514 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.025500 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6"] Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.026908 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.030333 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.041837 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6"] Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.133541 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcpt2\" (UniqueName: \"kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.133604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.133641 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.235497 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcpt2\" (UniqueName: \"kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.235577 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.235625 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.236304 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.236353 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.253976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcpt2\" (UniqueName: \"kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.342914 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.548707 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6"] Nov 28 15:36:00 crc kubenswrapper[4884]: I1128 15:36:00.921310 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerStarted","Data":"3b5b332365a02e87214958013528f87347348e70adec1eeb2bf60f95c85e0498"} Nov 28 15:36:02 crc kubenswrapper[4884]: I1128 15:36:02.936838 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerID="1eefbc857063aaee093c630eabf0a2cdd4285164b2a535add899c580673dd5d8" exitCode=0 Nov 28 15:36:02 crc kubenswrapper[4884]: I1128 15:36:02.936897 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerDied","Data":"1eefbc857063aaee093c630eabf0a2cdd4285164b2a535add899c580673dd5d8"} Nov 28 15:36:04 crc kubenswrapper[4884]: I1128 15:36:04.953119 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-mmq74" Nov 28 15:36:05 crc kubenswrapper[4884]: I1128 15:36:05.643320 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-qcdlm" Nov 28 15:36:08 crc kubenswrapper[4884]: I1128 15:36:08.981258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerStarted","Data":"3036fb6e6576ca2ad38b4700b8873bc274c64563dce39bc92c7f1ee18a478a25"} Nov 28 15:36:09 crc kubenswrapper[4884]: I1128 15:36:09.988171 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerID="3036fb6e6576ca2ad38b4700b8873bc274c64563dce39bc92c7f1ee18a478a25" exitCode=0 Nov 28 15:36:09 crc kubenswrapper[4884]: I1128 15:36:09.988226 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerDied","Data":"3036fb6e6576ca2ad38b4700b8873bc274c64563dce39bc92c7f1ee18a478a25"} Nov 28 15:36:10 crc kubenswrapper[4884]: I1128 15:36:10.997268 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerID="c8003e0517449f000960ba4ceb2e1d300eec954a559cc0ba0d6f0387f1e92105" exitCode=0 Nov 28 15:36:10 crc kubenswrapper[4884]: I1128 15:36:10.997310 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerDied","Data":"c8003e0517449f000960ba4ceb2e1d300eec954a559cc0ba0d6f0387f1e92105"} Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.329538 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.493469 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcpt2\" (UniqueName: \"kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2\") pod \"b7ff8f64-84f8-4079-9a59-dfd65a750348\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.493821 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util\") pod \"b7ff8f64-84f8-4079-9a59-dfd65a750348\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.493930 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle\") pod \"b7ff8f64-84f8-4079-9a59-dfd65a750348\" (UID: \"b7ff8f64-84f8-4079-9a59-dfd65a750348\") " Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.495177 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle" (OuterVolumeSpecName: "bundle") pod "b7ff8f64-84f8-4079-9a59-dfd65a750348" (UID: "b7ff8f64-84f8-4079-9a59-dfd65a750348"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.500046 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2" (OuterVolumeSpecName: "kube-api-access-mcpt2") pod "b7ff8f64-84f8-4079-9a59-dfd65a750348" (UID: "b7ff8f64-84f8-4079-9a59-dfd65a750348"). InnerVolumeSpecName "kube-api-access-mcpt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.506930 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util" (OuterVolumeSpecName: "util") pod "b7ff8f64-84f8-4079-9a59-dfd65a750348" (UID: "b7ff8f64-84f8-4079-9a59-dfd65a750348"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.595363 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.595411 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcpt2\" (UniqueName: \"kubernetes.io/projected/b7ff8f64-84f8-4079-9a59-dfd65a750348-kube-api-access-mcpt2\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:12 crc kubenswrapper[4884]: I1128 15:36:12.595427 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b7ff8f64-84f8-4079-9a59-dfd65a750348-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:13 crc kubenswrapper[4884]: I1128 15:36:13.020396 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" event={"ID":"b7ff8f64-84f8-4079-9a59-dfd65a750348","Type":"ContainerDied","Data":"3b5b332365a02e87214958013528f87347348e70adec1eeb2bf60f95c85e0498"} Nov 28 15:36:13 crc kubenswrapper[4884]: I1128 15:36:13.020434 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b5b332365a02e87214958013528f87347348e70adec1eeb2bf60f95c85e0498" Nov 28 15:36:13 crc kubenswrapper[4884]: I1128 15:36:13.020887 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6" Nov 28 15:36:14 crc kubenswrapper[4884]: I1128 15:36:14.941255 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-8ct25" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.337363 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm"] Nov 28 15:36:18 crc kubenswrapper[4884]: E1128 15:36:18.337983 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="util" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.337999 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="util" Nov 28 15:36:18 crc kubenswrapper[4884]: E1128 15:36:18.338010 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="pull" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.338017 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="pull" Nov 28 15:36:18 crc kubenswrapper[4884]: E1128 15:36:18.338044 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="extract" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.338052 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="extract" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.338200 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7ff8f64-84f8-4079-9a59-dfd65a750348" containerName="extract" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.338750 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.341071 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.341147 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.341284 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-jq79c" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.402448 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm"] Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.468329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/93427932-d4f2-4283-89d6-bb4bfc09f381-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.468425 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c427b\" (UniqueName: \"kubernetes.io/projected/93427932-d4f2-4283-89d6-bb4bfc09f381-kube-api-access-c427b\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.569781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/93427932-d4f2-4283-89d6-bb4bfc09f381-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.570569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c427b\" (UniqueName: \"kubernetes.io/projected/93427932-d4f2-4283-89d6-bb4bfc09f381-kube-api-access-c427b\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.571362 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/93427932-d4f2-4283-89d6-bb4bfc09f381-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.596906 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c427b\" (UniqueName: \"kubernetes.io/projected/93427932-d4f2-4283-89d6-bb4bfc09f381-kube-api-access-c427b\") pod \"cert-manager-operator-controller-manager-64cf6dff88-m7ldm\" (UID: \"93427932-d4f2-4283-89d6-bb4bfc09f381\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:18 crc kubenswrapper[4884]: I1128 15:36:18.657169 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" Nov 28 15:36:19 crc kubenswrapper[4884]: I1128 15:36:19.114266 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm"] Nov 28 15:36:19 crc kubenswrapper[4884]: W1128 15:36:19.118542 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93427932_d4f2_4283_89d6_bb4bfc09f381.slice/crio-fcbcaf5d568d2e06cbfc82a5ef1670f006a55361d789f76a0d048895b57470b1 WatchSource:0}: Error finding container fcbcaf5d568d2e06cbfc82a5ef1670f006a55361d789f76a0d048895b57470b1: Status 404 returned error can't find the container with id fcbcaf5d568d2e06cbfc82a5ef1670f006a55361d789f76a0d048895b57470b1 Nov 28 15:36:20 crc kubenswrapper[4884]: I1128 15:36:20.063296 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" event={"ID":"93427932-d4f2-4283-89d6-bb4bfc09f381","Type":"ContainerStarted","Data":"fcbcaf5d568d2e06cbfc82a5ef1670f006a55361d789f76a0d048895b57470b1"} Nov 28 15:36:27 crc kubenswrapper[4884]: I1128 15:36:27.191032 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" event={"ID":"93427932-d4f2-4283-89d6-bb4bfc09f381","Type":"ContainerStarted","Data":"db9f8fb5ba192f2539633e4b4648d27ca3222f5a8aadf5016dd76ecbcfa55a7f"} Nov 28 15:36:27 crc kubenswrapper[4884]: I1128 15:36:27.210029 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-m7ldm" podStartSLOduration=1.357060559 podStartE2EDuration="9.210012071s" podCreationTimestamp="2025-11-28 15:36:18 +0000 UTC" firstStartedPulling="2025-11-28 15:36:19.119878823 +0000 UTC m=+1018.682662624" lastFinishedPulling="2025-11-28 15:36:26.972830335 +0000 UTC m=+1026.535614136" observedRunningTime="2025-11-28 15:36:27.208901394 +0000 UTC m=+1026.771685195" watchObservedRunningTime="2025-11-28 15:36:27.210012071 +0000 UTC m=+1026.772795882" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.511550 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7jj8r"] Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.513477 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.515612 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.515803 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-bgz87" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.516164 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.532300 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7jj8r"] Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.651890 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.651962 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gzgp\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-kube-api-access-5gzgp\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.752981 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gzgp\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-kube-api-access-5gzgp\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.753188 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.771627 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.771787 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gzgp\" (UniqueName: \"kubernetes.io/projected/e6267b76-4b35-49cd-a61e-51906e210463-kube-api-access-5gzgp\") pod \"cert-manager-webhook-f4fb5df64-7jj8r\" (UID: \"e6267b76-4b35-49cd-a61e-51906e210463\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:30 crc kubenswrapper[4884]: I1128 15:36:30.829875 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.114960 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-7jj8r"] Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.217891 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" event={"ID":"e6267b76-4b35-49cd-a61e-51906e210463","Type":"ContainerStarted","Data":"a9bc77055197549110a7310698ca5fb04ef890370e0654108e5b92f27f661b79"} Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.902807 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc"] Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.903790 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.906381 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-7pnc8" Nov 28 15:36:31 crc kubenswrapper[4884]: I1128 15:36:31.914630 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc"] Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.072688 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jj7q\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-kube-api-access-8jj7q\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.072756 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.174157 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.174461 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jj7q\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-kube-api-access-8jj7q\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.194062 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.203251 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jj7q\" (UniqueName: \"kubernetes.io/projected/5d300312-c908-4667-a71f-f1d6e3279dbd-kube-api-access-8jj7q\") pod \"cert-manager-cainjector-855d9ccff4-r6cqc\" (UID: \"5d300312-c908-4667-a71f-f1d6e3279dbd\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.219988 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" Nov 28 15:36:32 crc kubenswrapper[4884]: I1128 15:36:32.701939 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc"] Nov 28 15:36:33 crc kubenswrapper[4884]: I1128 15:36:33.237430 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" event={"ID":"5d300312-c908-4667-a71f-f1d6e3279dbd","Type":"ContainerStarted","Data":"4b0a2171669133465c638182ba629abd510a21bab6f3ac994531eac71920770d"} Nov 28 15:36:41 crc kubenswrapper[4884]: I1128 15:36:41.294126 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" event={"ID":"5d300312-c908-4667-a71f-f1d6e3279dbd","Type":"ContainerStarted","Data":"94878d07df78896aeaee0e590279edb6fc8a8c5d0dfaa2ef162bbab886cad7c0"} Nov 28 15:36:41 crc kubenswrapper[4884]: I1128 15:36:41.296489 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" event={"ID":"e6267b76-4b35-49cd-a61e-51906e210463","Type":"ContainerStarted","Data":"5ccb0c36707294f71b5f1f0ac7b8ab09917785cc871b10f12be1613a653e0fc2"} Nov 28 15:36:41 crc kubenswrapper[4884]: I1128 15:36:41.296693 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:41 crc kubenswrapper[4884]: I1128 15:36:41.362349 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-r6cqc" podStartSLOduration=2.858437172 podStartE2EDuration="10.362324827s" podCreationTimestamp="2025-11-28 15:36:31 +0000 UTC" firstStartedPulling="2025-11-28 15:36:32.716389788 +0000 UTC m=+1032.279173589" lastFinishedPulling="2025-11-28 15:36:40.220277443 +0000 UTC m=+1039.783061244" observedRunningTime="2025-11-28 15:36:41.318427426 +0000 UTC m=+1040.881211237" watchObservedRunningTime="2025-11-28 15:36:41.362324827 +0000 UTC m=+1040.925108648" Nov 28 15:36:41 crc kubenswrapper[4884]: I1128 15:36:41.363542 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" podStartSLOduration=2.284692663 podStartE2EDuration="11.363531587s" podCreationTimestamp="2025-11-28 15:36:30 +0000 UTC" firstStartedPulling="2025-11-28 15:36:31.125119246 +0000 UTC m=+1030.687903047" lastFinishedPulling="2025-11-28 15:36:40.20395817 +0000 UTC m=+1039.766741971" observedRunningTime="2025-11-28 15:36:41.361144037 +0000 UTC m=+1040.923927868" watchObservedRunningTime="2025-11-28 15:36:41.363531587 +0000 UTC m=+1040.926315398" Nov 28 15:36:45 crc kubenswrapper[4884]: I1128 15:36:45.834041 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-7jj8r" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.402113 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-h88rg"] Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.403233 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.404835 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-9q87b" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.416872 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-h88rg"] Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.438583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfc5r\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-kube-api-access-nfc5r\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.438697 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-bound-sa-token\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.539905 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-bound-sa-token\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.539991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfc5r\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-kube-api-access-nfc5r\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.558898 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-bound-sa-token\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.559393 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfc5r\" (UniqueName: \"kubernetes.io/projected/58d06855-5e34-4170-afba-4c4266ff7e35-kube-api-access-nfc5r\") pod \"cert-manager-86cb77c54b-h88rg\" (UID: \"58d06855-5e34-4170-afba-4c4266ff7e35\") " pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:49 crc kubenswrapper[4884]: I1128 15:36:49.720801 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-h88rg" Nov 28 15:36:50 crc kubenswrapper[4884]: I1128 15:36:50.196321 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-h88rg"] Nov 28 15:36:50 crc kubenswrapper[4884]: I1128 15:36:50.356551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-h88rg" event={"ID":"58d06855-5e34-4170-afba-4c4266ff7e35","Type":"ContainerStarted","Data":"818f92da17eee607f9067f6046e281fd8abbd4d049149f3d903a42cafb045fce"} Nov 28 15:36:51 crc kubenswrapper[4884]: I1128 15:36:51.364427 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-h88rg" event={"ID":"58d06855-5e34-4170-afba-4c4266ff7e35","Type":"ContainerStarted","Data":"e405f5e51acb3db250307dfd7e72beafe5fae26924c26254dc2724ad09b78101"} Nov 28 15:36:51 crc kubenswrapper[4884]: I1128 15:36:51.381709 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-h88rg" podStartSLOduration=2.381689389 podStartE2EDuration="2.381689389s" podCreationTimestamp="2025-11-28 15:36:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:36:51.37974518 +0000 UTC m=+1050.942529011" watchObservedRunningTime="2025-11-28 15:36:51.381689389 +0000 UTC m=+1050.944473200" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.254264 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.255712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.257874 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-g5flb" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.258303 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.259473 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.270159 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.380027 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7t4n\" (UniqueName: \"kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n\") pod \"openstack-operator-index-9rgx2\" (UID: \"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02\") " pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.481985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7t4n\" (UniqueName: \"kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n\") pod \"openstack-operator-index-9rgx2\" (UID: \"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02\") " pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.501365 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7t4n\" (UniqueName: \"kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n\") pod \"openstack-operator-index-9rgx2\" (UID: \"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02\") " pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.584245 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:36:59 crc kubenswrapper[4884]: I1128 15:36:59.867740 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:37:00 crc kubenswrapper[4884]: I1128 15:37:00.426434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9rgx2" event={"ID":"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02","Type":"ContainerStarted","Data":"c6cbb0054d0831fe117cf3157c9b1c7ba99fc05c232aec2218f2d598a625665a"} Nov 28 15:37:02 crc kubenswrapper[4884]: I1128 15:37:02.439320 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9rgx2" event={"ID":"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02","Type":"ContainerStarted","Data":"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4"} Nov 28 15:37:02 crc kubenswrapper[4884]: I1128 15:37:02.463169 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-9rgx2" podStartSLOduration=1.074182315 podStartE2EDuration="3.463146476s" podCreationTimestamp="2025-11-28 15:36:59 +0000 UTC" firstStartedPulling="2025-11-28 15:36:59.875178601 +0000 UTC m=+1059.437962402" lastFinishedPulling="2025-11-28 15:37:02.264142762 +0000 UTC m=+1061.826926563" observedRunningTime="2025-11-28 15:37:02.453851791 +0000 UTC m=+1062.016635602" watchObservedRunningTime="2025-11-28 15:37:02.463146476 +0000 UTC m=+1062.025930287" Nov 28 15:37:02 crc kubenswrapper[4884]: I1128 15:37:02.619600 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.225785 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-d8z4x"] Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.227251 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.248654 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjfk7\" (UniqueName: \"kubernetes.io/projected/b487198c-9fa4-49b0-9adc-147b3e38dd94-kube-api-access-rjfk7\") pod \"openstack-operator-index-d8z4x\" (UID: \"b487198c-9fa4-49b0-9adc-147b3e38dd94\") " pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.251339 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d8z4x"] Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.349730 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjfk7\" (UniqueName: \"kubernetes.io/projected/b487198c-9fa4-49b0-9adc-147b3e38dd94-kube-api-access-rjfk7\") pod \"openstack-operator-index-d8z4x\" (UID: \"b487198c-9fa4-49b0-9adc-147b3e38dd94\") " pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.385371 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjfk7\" (UniqueName: \"kubernetes.io/projected/b487198c-9fa4-49b0-9adc-147b3e38dd94-kube-api-access-rjfk7\") pod \"openstack-operator-index-d8z4x\" (UID: \"b487198c-9fa4-49b0-9adc-147b3e38dd94\") " pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.564035 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:03 crc kubenswrapper[4884]: I1128 15:37:03.974882 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d8z4x"] Nov 28 15:37:03 crc kubenswrapper[4884]: W1128 15:37:03.981465 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb487198c_9fa4_49b0_9adc_147b3e38dd94.slice/crio-5954f012cdb3bbeac444945767f2ec85f285dcd74634ed997e1fc57c0b6d6ad7 WatchSource:0}: Error finding container 5954f012cdb3bbeac444945767f2ec85f285dcd74634ed997e1fc57c0b6d6ad7: Status 404 returned error can't find the container with id 5954f012cdb3bbeac444945767f2ec85f285dcd74634ed997e1fc57c0b6d6ad7 Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.452746 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d8z4x" event={"ID":"b487198c-9fa4-49b0-9adc-147b3e38dd94","Type":"ContainerStarted","Data":"70a2f486f808590ea274af8e381e9b52d408fb060e4efdc628230dd46c0c9ba9"} Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.452792 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d8z4x" event={"ID":"b487198c-9fa4-49b0-9adc-147b3e38dd94","Type":"ContainerStarted","Data":"5954f012cdb3bbeac444945767f2ec85f285dcd74634ed997e1fc57c0b6d6ad7"} Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.452891 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-9rgx2" podUID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" containerName="registry-server" containerID="cri-o://a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4" gracePeriod=2 Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.809828 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.827680 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-d8z4x" podStartSLOduration=1.728764767 podStartE2EDuration="1.82766122s" podCreationTimestamp="2025-11-28 15:37:03 +0000 UTC" firstStartedPulling="2025-11-28 15:37:03.985597435 +0000 UTC m=+1063.548381256" lastFinishedPulling="2025-11-28 15:37:04.084493908 +0000 UTC m=+1063.647277709" observedRunningTime="2025-11-28 15:37:04.48020011 +0000 UTC m=+1064.042983921" watchObservedRunningTime="2025-11-28 15:37:04.82766122 +0000 UTC m=+1064.390445041" Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.869959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7t4n\" (UniqueName: \"kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n\") pod \"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02\" (UID: \"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02\") " Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.875607 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n" (OuterVolumeSpecName: "kube-api-access-q7t4n") pod "744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" (UID: "744bd25e-57e1-4fe2-8c9c-3aac65b3ee02"). InnerVolumeSpecName "kube-api-access-q7t4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:37:04 crc kubenswrapper[4884]: I1128 15:37:04.971583 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7t4n\" (UniqueName: \"kubernetes.io/projected/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02-kube-api-access-q7t4n\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.459812 4884 generic.go:334] "Generic (PLEG): container finished" podID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" containerID="a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4" exitCode=0 Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.459867 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9rgx2" Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.459885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9rgx2" event={"ID":"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02","Type":"ContainerDied","Data":"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4"} Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.460211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9rgx2" event={"ID":"744bd25e-57e1-4fe2-8c9c-3aac65b3ee02","Type":"ContainerDied","Data":"c6cbb0054d0831fe117cf3157c9b1c7ba99fc05c232aec2218f2d598a625665a"} Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.460239 4884 scope.go:117] "RemoveContainer" containerID="a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4" Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.474549 4884 scope.go:117] "RemoveContainer" containerID="a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4" Nov 28 15:37:05 crc kubenswrapper[4884]: E1128 15:37:05.474999 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4\": container with ID starting with a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4 not found: ID does not exist" containerID="a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4" Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.475034 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4"} err="failed to get container status \"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4\": rpc error: code = NotFound desc = could not find container \"a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4\": container with ID starting with a53592986a3ead91052e18c75c3fab388dd6d64b68eacf3255c3690b9ceeb3b4 not found: ID does not exist" Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.484550 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:37:05 crc kubenswrapper[4884]: I1128 15:37:05.488619 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-9rgx2"] Nov 28 15:37:06 crc kubenswrapper[4884]: I1128 15:37:06.702980 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" path="/var/lib/kubelet/pods/744bd25e-57e1-4fe2-8c9c-3aac65b3ee02/volumes" Nov 28 15:37:13 crc kubenswrapper[4884]: I1128 15:37:13.565003 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:13 crc kubenswrapper[4884]: I1128 15:37:13.565648 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:13 crc kubenswrapper[4884]: I1128 15:37:13.592213 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:14 crc kubenswrapper[4884]: I1128 15:37:14.555493 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-d8z4x" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.554058 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2"] Nov 28 15:37:19 crc kubenswrapper[4884]: E1128 15:37:19.554962 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" containerName="registry-server" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.554993 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" containerName="registry-server" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.555370 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="744bd25e-57e1-4fe2-8c9c-3aac65b3ee02" containerName="registry-server" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.556976 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.562405 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-wmtvd" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.570519 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2"] Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.601939 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.602097 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.602260 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tl8x\" (UniqueName: \"kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.703939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tl8x\" (UniqueName: \"kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.704089 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.704165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.704924 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.705421 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.736871 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tl8x\" (UniqueName: \"kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x\") pod \"291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:19 crc kubenswrapper[4884]: I1128 15:37:19.889572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:20 crc kubenswrapper[4884]: I1128 15:37:20.539516 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2"] Nov 28 15:37:20 crc kubenswrapper[4884]: I1128 15:37:20.569708 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" event={"ID":"581c69a1-7109-4834-be13-a37c3343212b","Type":"ContainerStarted","Data":"8736d0fc5a025a5c0a415c0dc19a9cffb921c5737e8481d9073782d59ce58b02"} Nov 28 15:37:22 crc kubenswrapper[4884]: I1128 15:37:22.418557 4884 generic.go:334] "Generic (PLEG): container finished" podID="581c69a1-7109-4834-be13-a37c3343212b" containerID="b4a71bd7f34346b7cde4917b3ee3324b44ca72dfe44646243e855ba53d870f99" exitCode=0 Nov 28 15:37:22 crc kubenswrapper[4884]: I1128 15:37:22.418604 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" event={"ID":"581c69a1-7109-4834-be13-a37c3343212b","Type":"ContainerDied","Data":"b4a71bd7f34346b7cde4917b3ee3324b44ca72dfe44646243e855ba53d870f99"} Nov 28 15:37:23 crc kubenswrapper[4884]: I1128 15:37:23.435918 4884 generic.go:334] "Generic (PLEG): container finished" podID="581c69a1-7109-4834-be13-a37c3343212b" containerID="807d9d6f05eef0ce9d54d1d3ae092987e874970659ab3f34870c0b76a50a6fff" exitCode=0 Nov 28 15:37:23 crc kubenswrapper[4884]: I1128 15:37:23.436210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" event={"ID":"581c69a1-7109-4834-be13-a37c3343212b","Type":"ContainerDied","Data":"807d9d6f05eef0ce9d54d1d3ae092987e874970659ab3f34870c0b76a50a6fff"} Nov 28 15:37:24 crc kubenswrapper[4884]: I1128 15:37:24.449222 4884 generic.go:334] "Generic (PLEG): container finished" podID="581c69a1-7109-4834-be13-a37c3343212b" containerID="3a9d952d417fdfad10f5943d697303f45a85742c652f013a156fa89dc0475ade" exitCode=0 Nov 28 15:37:24 crc kubenswrapper[4884]: I1128 15:37:24.449389 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" event={"ID":"581c69a1-7109-4834-be13-a37c3343212b","Type":"ContainerDied","Data":"3a9d952d417fdfad10f5943d697303f45a85742c652f013a156fa89dc0475ade"} Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.791473 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.892803 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util\") pod \"581c69a1-7109-4834-be13-a37c3343212b\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.892928 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle\") pod \"581c69a1-7109-4834-be13-a37c3343212b\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.893002 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tl8x\" (UniqueName: \"kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x\") pod \"581c69a1-7109-4834-be13-a37c3343212b\" (UID: \"581c69a1-7109-4834-be13-a37c3343212b\") " Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.894998 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle" (OuterVolumeSpecName: "bundle") pod "581c69a1-7109-4834-be13-a37c3343212b" (UID: "581c69a1-7109-4834-be13-a37c3343212b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.902068 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x" (OuterVolumeSpecName: "kube-api-access-4tl8x") pod "581c69a1-7109-4834-be13-a37c3343212b" (UID: "581c69a1-7109-4834-be13-a37c3343212b"). InnerVolumeSpecName "kube-api-access-4tl8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.924964 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util" (OuterVolumeSpecName: "util") pod "581c69a1-7109-4834-be13-a37c3343212b" (UID: "581c69a1-7109-4834-be13-a37c3343212b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.994664 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.994710 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/581c69a1-7109-4834-be13-a37c3343212b-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:25 crc kubenswrapper[4884]: I1128 15:37:25.994728 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tl8x\" (UniqueName: \"kubernetes.io/projected/581c69a1-7109-4834-be13-a37c3343212b-kube-api-access-4tl8x\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:26 crc kubenswrapper[4884]: I1128 15:37:26.468159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" event={"ID":"581c69a1-7109-4834-be13-a37c3343212b","Type":"ContainerDied","Data":"8736d0fc5a025a5c0a415c0dc19a9cffb921c5737e8481d9073782d59ce58b02"} Nov 28 15:37:26 crc kubenswrapper[4884]: I1128 15:37:26.468750 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8736d0fc5a025a5c0a415c0dc19a9cffb921c5737e8481d9073782d59ce58b02" Nov 28 15:37:26 crc kubenswrapper[4884]: I1128 15:37:26.468605 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.316938 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq"] Nov 28 15:37:31 crc kubenswrapper[4884]: E1128 15:37:31.317723 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="pull" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.317736 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="pull" Nov 28 15:37:31 crc kubenswrapper[4884]: E1128 15:37:31.317747 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="util" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.317752 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="util" Nov 28 15:37:31 crc kubenswrapper[4884]: E1128 15:37:31.317774 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="extract" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.317780 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="extract" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.317891 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="581c69a1-7109-4834-be13-a37c3343212b" containerName="extract" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.318567 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.322197 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-f66f8" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.341774 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq"] Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.366952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5g2z\" (UniqueName: \"kubernetes.io/projected/62a970c8-14b9-462a-8e0f-773b9b2847c5-kube-api-access-k5g2z\") pod \"openstack-operator-controller-operator-857c5c6d5d-2dlgq\" (UID: \"62a970c8-14b9-462a-8e0f-773b9b2847c5\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.468666 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5g2z\" (UniqueName: \"kubernetes.io/projected/62a970c8-14b9-462a-8e0f-773b9b2847c5-kube-api-access-k5g2z\") pod \"openstack-operator-controller-operator-857c5c6d5d-2dlgq\" (UID: \"62a970c8-14b9-462a-8e0f-773b9b2847c5\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.487720 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5g2z\" (UniqueName: \"kubernetes.io/projected/62a970c8-14b9-462a-8e0f-773b9b2847c5-kube-api-access-k5g2z\") pod \"openstack-operator-controller-operator-857c5c6d5d-2dlgq\" (UID: \"62a970c8-14b9-462a-8e0f-773b9b2847c5\") " pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:31 crc kubenswrapper[4884]: I1128 15:37:31.636455 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:32 crc kubenswrapper[4884]: I1128 15:37:32.073581 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq"] Nov 28 15:37:32 crc kubenswrapper[4884]: W1128 15:37:32.085783 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod62a970c8_14b9_462a_8e0f_773b9b2847c5.slice/crio-02a6eab4fe9300828ebbfe07b2c6a3d6d6206486460557273c53cedf649e726d WatchSource:0}: Error finding container 02a6eab4fe9300828ebbfe07b2c6a3d6d6206486460557273c53cedf649e726d: Status 404 returned error can't find the container with id 02a6eab4fe9300828ebbfe07b2c6a3d6d6206486460557273c53cedf649e726d Nov 28 15:37:32 crc kubenswrapper[4884]: I1128 15:37:32.507583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" event={"ID":"62a970c8-14b9-462a-8e0f-773b9b2847c5","Type":"ContainerStarted","Data":"02a6eab4fe9300828ebbfe07b2c6a3d6d6206486460557273c53cedf649e726d"} Nov 28 15:37:38 crc kubenswrapper[4884]: I1128 15:37:38.550167 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" event={"ID":"62a970c8-14b9-462a-8e0f-773b9b2847c5","Type":"ContainerStarted","Data":"b34d2a0dd82f90eaa6dba51c4ff634daa0eccab9a8a257db14f9c828f3d59092"} Nov 28 15:37:40 crc kubenswrapper[4884]: I1128 15:37:40.564529 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" event={"ID":"62a970c8-14b9-462a-8e0f-773b9b2847c5","Type":"ContainerStarted","Data":"f8bd78332d6177f0443789c8b708bede3042d37406db2f294cc8369b8abb682b"} Nov 28 15:37:40 crc kubenswrapper[4884]: I1128 15:37:40.564889 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:37:40 crc kubenswrapper[4884]: I1128 15:37:40.593652 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" podStartSLOduration=1.490385464 podStartE2EDuration="9.593631898s" podCreationTimestamp="2025-11-28 15:37:31 +0000 UTC" firstStartedPulling="2025-11-28 15:37:32.093266817 +0000 UTC m=+1091.656050618" lastFinishedPulling="2025-11-28 15:37:40.196513241 +0000 UTC m=+1099.759297052" observedRunningTime="2025-11-28 15:37:40.589715208 +0000 UTC m=+1100.152499049" watchObservedRunningTime="2025-11-28 15:37:40.593631898 +0000 UTC m=+1100.156415699" Nov 28 15:37:51 crc kubenswrapper[4884]: I1128 15:37:51.639292 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-857c5c6d5d-2dlgq" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.202066 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.203795 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.208564 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-j56b4"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.209822 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-znw44" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.210261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.212050 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-k2rbr" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.228725 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.239638 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzj58\" (UniqueName: \"kubernetes.io/projected/c54a7e19-62d7-474b-90f2-0411cc2a1942-kube-api-access-jzj58\") pod \"barbican-operator-controller-manager-5bfbbb859d-xplsn\" (UID: \"c54a7e19-62d7-474b-90f2-0411cc2a1942\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.239704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnth7\" (UniqueName: \"kubernetes.io/projected/ef79df92-f12e-4606-8b3d-27b23a1bc3c7-kube-api-access-lnth7\") pod \"cinder-operator-controller-manager-748967c98-j56b4\" (UID: \"ef79df92-f12e-4606-8b3d-27b23a1bc3c7\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.240131 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.241763 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.244307 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-9qllv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.244785 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-j56b4"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.255193 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.262601 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.263527 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.267618 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-7rdq9" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.276046 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.297170 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.298464 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.301547 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.303863 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-c77c5" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.326282 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.327177 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.330588 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-rct7s" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.339484 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.342910 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d87cd\" (UniqueName: \"kubernetes.io/projected/fca659fc-dce8-4284-a2a4-bbeb59993bcf-kube-api-access-d87cd\") pod \"heat-operator-controller-manager-698d6fd7d6-4k5mj\" (UID: \"fca659fc-dce8-4284-a2a4-bbeb59993bcf\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.342961 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkvdh\" (UniqueName: \"kubernetes.io/projected/262d6bdb-061f-406a-bd20-c0e17112188a-kube-api-access-gkvdh\") pod \"designate-operator-controller-manager-6788cc6d75-5z54f\" (UID: \"262d6bdb-061f-406a-bd20-c0e17112188a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.342987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhlmx\" (UniqueName: \"kubernetes.io/projected/9831a921-a48d-4446-a5da-648cebb21936-kube-api-access-bhlmx\") pod \"glance-operator-controller-manager-85fbd69fcd-ddjbx\" (UID: \"9831a921-a48d-4446-a5da-648cebb21936\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.343022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzj58\" (UniqueName: \"kubernetes.io/projected/c54a7e19-62d7-474b-90f2-0411cc2a1942-kube-api-access-jzj58\") pod \"barbican-operator-controller-manager-5bfbbb859d-xplsn\" (UID: \"c54a7e19-62d7-474b-90f2-0411cc2a1942\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.343041 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnth7\" (UniqueName: \"kubernetes.io/projected/ef79df92-f12e-4606-8b3d-27b23a1bc3c7-kube-api-access-lnth7\") pod \"cinder-operator-controller-manager-748967c98-j56b4\" (UID: \"ef79df92-f12e-4606-8b3d-27b23a1bc3c7\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.353202 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.354173 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.358159 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.361812 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.361991 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-mgdsm" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.362080 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.362964 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.372462 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-hzbzc" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.372495 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.373422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.376134 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-cnhnw" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.377152 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.384664 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnth7\" (UniqueName: \"kubernetes.io/projected/ef79df92-f12e-4606-8b3d-27b23a1bc3c7-kube-api-access-lnth7\") pod \"cinder-operator-controller-manager-748967c98-j56b4\" (UID: \"ef79df92-f12e-4606-8b3d-27b23a1bc3c7\") " pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.388879 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.389880 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.391333 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzj58\" (UniqueName: \"kubernetes.io/projected/c54a7e19-62d7-474b-90f2-0411cc2a1942-kube-api-access-jzj58\") pod \"barbican-operator-controller-manager-5bfbbb859d-xplsn\" (UID: \"c54a7e19-62d7-474b-90f2-0411cc2a1942\") " pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.395278 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.399986 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-rqc6f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.404319 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.405280 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.407999 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-r77t7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.408205 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.429136 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.443955 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sdwv\" (UniqueName: \"kubernetes.io/projected/4e61efa8-cdc4-4974-ab4b-5f81e26cd439-kube-api-access-6sdwv\") pod \"mariadb-operator-controller-manager-64d7c556cd-hrrh7\" (UID: \"4e61efa8-cdc4-4974-ab4b-5f81e26cd439\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444001 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksbpr\" (UniqueName: \"kubernetes.io/projected/36d673e0-6b01-4296-829d-ce3c935876ad-kube-api-access-ksbpr\") pod \"keystone-operator-controller-manager-79cc9d59f5-zd6gg\" (UID: \"36d673e0-6b01-4296-829d-ce3c935876ad\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444042 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444105 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d87cd\" (UniqueName: \"kubernetes.io/projected/fca659fc-dce8-4284-a2a4-bbeb59993bcf-kube-api-access-d87cd\") pod \"heat-operator-controller-manager-698d6fd7d6-4k5mj\" (UID: \"fca659fc-dce8-4284-a2a4-bbeb59993bcf\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444132 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llsmm\" (UniqueName: \"kubernetes.io/projected/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-kube-api-access-llsmm\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkvdh\" (UniqueName: \"kubernetes.io/projected/262d6bdb-061f-406a-bd20-c0e17112188a-kube-api-access-gkvdh\") pod \"designate-operator-controller-manager-6788cc6d75-5z54f\" (UID: \"262d6bdb-061f-406a-bd20-c0e17112188a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444200 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl82c\" (UniqueName: \"kubernetes.io/projected/6c2798c4-5cb6-4d34-9178-1d422470bbb1-kube-api-access-sl82c\") pod \"horizon-operator-controller-manager-7d5d9fd47f-67f2r\" (UID: \"6c2798c4-5cb6-4d34-9178-1d422470bbb1\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhlmx\" (UniqueName: \"kubernetes.io/projected/9831a921-a48d-4446-a5da-648cebb21936-kube-api-access-bhlmx\") pod \"glance-operator-controller-manager-85fbd69fcd-ddjbx\" (UID: \"9831a921-a48d-4446-a5da-648cebb21936\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444256 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv99l\" (UniqueName: \"kubernetes.io/projected/c4d80b0a-9e14-4a7a-8461-129c8cb07e9d-kube-api-access-pv99l\") pod \"ironic-operator-controller-manager-54485f899-5xhvn\" (UID: \"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.444287 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8ckm\" (UniqueName: \"kubernetes.io/projected/8bcfffae-7352-42ae-9fe3-c0a6e85a9301-kube-api-access-v8ckm\") pod \"manila-operator-controller-manager-5cbc8c7f96-qbh5l\" (UID: \"8bcfffae-7352-42ae-9fe3-c0a6e85a9301\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.497154 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhlmx\" (UniqueName: \"kubernetes.io/projected/9831a921-a48d-4446-a5da-648cebb21936-kube-api-access-bhlmx\") pod \"glance-operator-controller-manager-85fbd69fcd-ddjbx\" (UID: \"9831a921-a48d-4446-a5da-648cebb21936\") " pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.499710 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-422zp"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.503858 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkvdh\" (UniqueName: \"kubernetes.io/projected/262d6bdb-061f-406a-bd20-c0e17112188a-kube-api-access-gkvdh\") pod \"designate-operator-controller-manager-6788cc6d75-5z54f\" (UID: \"262d6bdb-061f-406a-bd20-c0e17112188a\") " pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.505184 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d87cd\" (UniqueName: \"kubernetes.io/projected/fca659fc-dce8-4284-a2a4-bbeb59993bcf-kube-api-access-d87cd\") pod \"heat-operator-controller-manager-698d6fd7d6-4k5mj\" (UID: \"fca659fc-dce8-4284-a2a4-bbeb59993bcf\") " pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.508668 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-lkq65"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.509920 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.512214 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-nqg2q" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.557051 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.558251 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.560861 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-q6gr7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.561344 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.561545 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: E1128 15:38:07.561802 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:07 crc kubenswrapper[4884]: E1128 15:38:07.561862 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert podName:19969fcc-a2cf-4ed4-afd5-4d585f5dcb70 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:08.061845216 +0000 UTC m=+1127.624629017 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert") pod "infra-operator-controller-manager-6c55d8d69b-5qkwv" (UID: "19969fcc-a2cf-4ed4-afd5-4d585f5dcb70") : secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562452 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llsmm\" (UniqueName: \"kubernetes.io/projected/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-kube-api-access-llsmm\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl82c\" (UniqueName: \"kubernetes.io/projected/6c2798c4-5cb6-4d34-9178-1d422470bbb1-kube-api-access-sl82c\") pod \"horizon-operator-controller-manager-7d5d9fd47f-67f2r\" (UID: \"6c2798c4-5cb6-4d34-9178-1d422470bbb1\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562542 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-526xs\" (UniqueName: \"kubernetes.io/projected/26e94d5b-3a3c-49d9-910f-4f339780ec2f-kube-api-access-526xs\") pod \"nova-operator-controller-manager-79d658b66d-422zp\" (UID: \"26e94d5b-3a3c-49d9-910f-4f339780ec2f\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv99l\" (UniqueName: \"kubernetes.io/projected/c4d80b0a-9e14-4a7a-8461-129c8cb07e9d-kube-api-access-pv99l\") pod \"ironic-operator-controller-manager-54485f899-5xhvn\" (UID: \"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562602 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8ckm\" (UniqueName: \"kubernetes.io/projected/8bcfffae-7352-42ae-9fe3-c0a6e85a9301-kube-api-access-v8ckm\") pod \"manila-operator-controller-manager-5cbc8c7f96-qbh5l\" (UID: \"8bcfffae-7352-42ae-9fe3-c0a6e85a9301\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562663 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sdwv\" (UniqueName: \"kubernetes.io/projected/4e61efa8-cdc4-4974-ab4b-5f81e26cd439-kube-api-access-6sdwv\") pod \"mariadb-operator-controller-manager-64d7c556cd-hrrh7\" (UID: \"4e61efa8-cdc4-4974-ab4b-5f81e26cd439\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.562680 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksbpr\" (UniqueName: \"kubernetes.io/projected/36d673e0-6b01-4296-829d-ce3c935876ad-kube-api-access-ksbpr\") pod \"keystone-operator-controller-manager-79cc9d59f5-zd6gg\" (UID: \"36d673e0-6b01-4296-829d-ce3c935876ad\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.564156 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.588215 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-422zp"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.588332 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.604720 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksbpr\" (UniqueName: \"kubernetes.io/projected/36d673e0-6b01-4296-829d-ce3c935876ad-kube-api-access-ksbpr\") pod \"keystone-operator-controller-manager-79cc9d59f5-zd6gg\" (UID: \"36d673e0-6b01-4296-829d-ce3c935876ad\") " pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.606783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8ckm\" (UniqueName: \"kubernetes.io/projected/8bcfffae-7352-42ae-9fe3-c0a6e85a9301-kube-api-access-v8ckm\") pod \"manila-operator-controller-manager-5cbc8c7f96-qbh5l\" (UID: \"8bcfffae-7352-42ae-9fe3-c0a6e85a9301\") " pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.621378 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.630125 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llsmm\" (UniqueName: \"kubernetes.io/projected/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-kube-api-access-llsmm\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.632836 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-lkq65"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.647509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sdwv\" (UniqueName: \"kubernetes.io/projected/4e61efa8-cdc4-4974-ab4b-5f81e26cd439-kube-api-access-6sdwv\") pod \"mariadb-operator-controller-manager-64d7c556cd-hrrh7\" (UID: \"4e61efa8-cdc4-4974-ab4b-5f81e26cd439\") " pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.647590 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.655023 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl82c\" (UniqueName: \"kubernetes.io/projected/6c2798c4-5cb6-4d34-9178-1d422470bbb1-kube-api-access-sl82c\") pod \"horizon-operator-controller-manager-7d5d9fd47f-67f2r\" (UID: \"6c2798c4-5cb6-4d34-9178-1d422470bbb1\") " pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.659752 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.663679 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c8lv\" (UniqueName: \"kubernetes.io/projected/20081e0d-0be1-46c4-a60a-49034215b26d-kube-api-access-5c8lv\") pod \"neutron-operator-controller-manager-58879495c-lkq65\" (UID: \"20081e0d-0be1-46c4-a60a-49034215b26d\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.663742 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-526xs\" (UniqueName: \"kubernetes.io/projected/26e94d5b-3a3c-49d9-910f-4f339780ec2f-kube-api-access-526xs\") pod \"nova-operator-controller-manager-79d658b66d-422zp\" (UID: \"26e94d5b-3a3c-49d9-910f-4f339780ec2f\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.664941 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.667236 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-x86bp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.685572 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-526xs\" (UniqueName: \"kubernetes.io/projected/26e94d5b-3a3c-49d9-910f-4f339780ec2f-kube-api-access-526xs\") pod \"nova-operator-controller-manager-79d658b66d-422zp\" (UID: \"26e94d5b-3a3c-49d9-910f-4f339780ec2f\") " pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.685832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv99l\" (UniqueName: \"kubernetes.io/projected/c4d80b0a-9e14-4a7a-8461-129c8cb07e9d-kube-api-access-pv99l\") pod \"ironic-operator-controller-manager-54485f899-5xhvn\" (UID: \"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d\") " pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.717210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.718298 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.736697 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-g82xf" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.737217 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.750730 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.761339 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.762421 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.763339 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.764911 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c8lv\" (UniqueName: \"kubernetes.io/projected/20081e0d-0be1-46c4-a60a-49034215b26d-kube-api-access-5c8lv\") pod \"neutron-operator-controller-manager-58879495c-lkq65\" (UID: \"20081e0d-0be1-46c4-a60a-49034215b26d\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.768473 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-p5mcs" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.772877 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.793072 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.799005 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c8lv\" (UniqueName: \"kubernetes.io/projected/20081e0d-0be1-46c4-a60a-49034215b26d-kube-api-access-5c8lv\") pod \"neutron-operator-controller-manager-58879495c-lkq65\" (UID: \"20081e0d-0be1-46c4-a60a-49034215b26d\") " pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.813744 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.815360 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.825999 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-wkvwz" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.834786 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.837893 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.853216 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.868616 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.870742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn8wb\" (UniqueName: \"kubernetes.io/projected/a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9-kube-api-access-gn8wb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-89jvx\" (UID: \"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.870833 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.870864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz2pz\" (UniqueName: \"kubernetes.io/projected/a96a807f-9e3f-426a-bd7c-b1c14db24baa-kube-api-access-wz2pz\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.870928 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mxvf\" (UniqueName: \"kubernetes.io/projected/eb06d6ab-34b7-4511-9680-6903fe6d50b7-kube-api-access-2mxvf\") pod \"octavia-operator-controller-manager-d5fb87cb8-srsqz\" (UID: \"eb06d6ab-34b7-4511-9680-6903fe6d50b7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.872373 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.873600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.877867 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-z2phd" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.889037 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.894102 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.895242 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.899097 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-pwx55" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.912891 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.928776 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.944508 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.963150 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9"] Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.969650 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.973280 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-9bmvp" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979172 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz2pz\" (UniqueName: \"kubernetes.io/projected/a96a807f-9e3f-426a-bd7c-b1c14db24baa-kube-api-access-wz2pz\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckgq7\" (UniqueName: \"kubernetes.io/projected/23a976c8-631a-4d07-a1a1-d44fdbac2e06-kube-api-access-ckgq7\") pod \"placement-operator-controller-manager-867d87977b-zcwcr\" (UID: \"23a976c8-631a-4d07-a1a1-d44fdbac2e06\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979298 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mxvf\" (UniqueName: \"kubernetes.io/projected/eb06d6ab-34b7-4511-9680-6903fe6d50b7-kube-api-access-2mxvf\") pod \"octavia-operator-controller-manager-d5fb87cb8-srsqz\" (UID: \"eb06d6ab-34b7-4511-9680-6903fe6d50b7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979327 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7qsz\" (UniqueName: \"kubernetes.io/projected/53444326-ebe6-45f5-a086-63ef03d1533a-kube-api-access-z7qsz\") pod \"telemetry-operator-controller-manager-695797c565-zfmh2\" (UID: \"53444326-ebe6-45f5-a086-63ef03d1533a\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979414 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlfsl\" (UniqueName: \"kubernetes.io/projected/7a96a060-30e3-4405-83cb-7a9b85d24c84-kube-api-access-tlfsl\") pod \"test-operator-controller-manager-bb86466d8-5g9m9\" (UID: \"7a96a060-30e3-4405-83cb-7a9b85d24c84\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979439 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn8wb\" (UniqueName: \"kubernetes.io/projected/a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9-kube-api-access-gn8wb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-89jvx\" (UID: \"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979485 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:07 crc kubenswrapper[4884]: I1128 15:38:07.979509 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd49m\" (UniqueName: \"kubernetes.io/projected/6fb9fd98-178d-4461-a090-ae0a6b35e258-kube-api-access-bd49m\") pod \"swift-operator-controller-manager-8f6687c44-5wqmv\" (UID: \"6fb9fd98-178d-4461-a090-ae0a6b35e258\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:07 crc kubenswrapper[4884]: E1128 15:38:07.981674 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:07 crc kubenswrapper[4884]: E1128 15:38:07.981741 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert podName:a96a807f-9e3f-426a-bd7c-b1c14db24baa nodeName:}" failed. No retries permitted until 2025-11-28 15:38:08.48172152 +0000 UTC m=+1128.044505401 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert") pod "openstack-baremetal-operator-controller-manager-77868f484-gt8db" (UID: "a96a807f-9e3f-426a-bd7c-b1c14db24baa") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.009145 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.017453 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn8wb\" (UniqueName: \"kubernetes.io/projected/a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9-kube-api-access-gn8wb\") pod \"ovn-operator-controller-manager-5b67cfc8fb-89jvx\" (UID: \"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9\") " pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.018083 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz2pz\" (UniqueName: \"kubernetes.io/projected/a96a807f-9e3f-426a-bd7c-b1c14db24baa-kube-api-access-wz2pz\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.024830 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mxvf\" (UniqueName: \"kubernetes.io/projected/eb06d6ab-34b7-4511-9680-6903fe6d50b7-kube-api-access-2mxvf\") pod \"octavia-operator-controller-manager-d5fb87cb8-srsqz\" (UID: \"eb06d6ab-34b7-4511-9680-6903fe6d50b7\") " pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.055330 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.057469 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.067910 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-jqvll" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlfsl\" (UniqueName: \"kubernetes.io/projected/7a96a060-30e3-4405-83cb-7a9b85d24c84-kube-api-access-tlfsl\") pod \"test-operator-controller-manager-bb86466d8-5g9m9\" (UID: \"7a96a060-30e3-4405-83cb-7a9b85d24c84\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8hm7\" (UniqueName: \"kubernetes.io/projected/ac6ec22d-b6e9-49c7-9c84-222e82ba75d2-kube-api-access-n8hm7\") pod \"watcher-operator-controller-manager-6b56b8849f-lsxjw\" (UID: \"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082359 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd49m\" (UniqueName: \"kubernetes.io/projected/6fb9fd98-178d-4461-a090-ae0a6b35e258-kube-api-access-bd49m\") pod \"swift-operator-controller-manager-8f6687c44-5wqmv\" (UID: \"6fb9fd98-178d-4461-a090-ae0a6b35e258\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckgq7\" (UniqueName: \"kubernetes.io/projected/23a976c8-631a-4d07-a1a1-d44fdbac2e06-kube-api-access-ckgq7\") pod \"placement-operator-controller-manager-867d87977b-zcwcr\" (UID: \"23a976c8-631a-4d07-a1a1-d44fdbac2e06\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.082516 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7qsz\" (UniqueName: \"kubernetes.io/projected/53444326-ebe6-45f5-a086-63ef03d1533a-kube-api-access-z7qsz\") pod \"telemetry-operator-controller-manager-695797c565-zfmh2\" (UID: \"53444326-ebe6-45f5-a086-63ef03d1533a\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:08 crc kubenswrapper[4884]: E1128 15:38:08.083169 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.085985 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw"] Nov 28 15:38:08 crc kubenswrapper[4884]: E1128 15:38:08.086069 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert podName:19969fcc-a2cf-4ed4-afd5-4d585f5dcb70 nodeName:}" failed. No retries permitted until 2025-11-28 15:38:09.083210175 +0000 UTC m=+1128.645993976 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert") pod "infra-operator-controller-manager-6c55d8d69b-5qkwv" (UID: "19969fcc-a2cf-4ed4-afd5-4d585f5dcb70") : secret "infra-operator-webhook-server-cert" not found Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.105265 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.113633 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlfsl\" (UniqueName: \"kubernetes.io/projected/7a96a060-30e3-4405-83cb-7a9b85d24c84-kube-api-access-tlfsl\") pod \"test-operator-controller-manager-bb86466d8-5g9m9\" (UID: \"7a96a060-30e3-4405-83cb-7a9b85d24c84\") " pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.115162 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7qsz\" (UniqueName: \"kubernetes.io/projected/53444326-ebe6-45f5-a086-63ef03d1533a-kube-api-access-z7qsz\") pod \"telemetry-operator-controller-manager-695797c565-zfmh2\" (UID: \"53444326-ebe6-45f5-a086-63ef03d1533a\") " pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.115721 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd49m\" (UniqueName: \"kubernetes.io/projected/6fb9fd98-178d-4461-a090-ae0a6b35e258-kube-api-access-bd49m\") pod \"swift-operator-controller-manager-8f6687c44-5wqmv\" (UID: \"6fb9fd98-178d-4461-a090-ae0a6b35e258\") " pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.153844 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckgq7\" (UniqueName: \"kubernetes.io/projected/23a976c8-631a-4d07-a1a1-d44fdbac2e06-kube-api-access-ckgq7\") pod \"placement-operator-controller-manager-867d87977b-zcwcr\" (UID: \"23a976c8-631a-4d07-a1a1-d44fdbac2e06\") " pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.154433 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.196812 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8hm7\" (UniqueName: \"kubernetes.io/projected/ac6ec22d-b6e9-49c7-9c84-222e82ba75d2-kube-api-access-n8hm7\") pod \"watcher-operator-controller-manager-6b56b8849f-lsxjw\" (UID: \"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.224278 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.230770 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8hm7\" (UniqueName: \"kubernetes.io/projected/ac6ec22d-b6e9-49c7-9c84-222e82ba75d2-kube-api-access-n8hm7\") pod \"watcher-operator-controller-manager-6b56b8849f-lsxjw\" (UID: \"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2\") " pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.231072 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.232726 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-6b948" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.233008 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.249419 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.264985 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.276889 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.286494 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.296867 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.298173 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.299415 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.309801 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.310595 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-448dc" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.337174 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-748967c98-j56b4"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.401106 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.402122 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wz488\" (UniqueName: \"kubernetes.io/projected/fe380e38-07d1-423d-8a43-51f2a0dc154f-kube-api-access-wz488\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.402294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.402937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn7qc\" (UniqueName: \"kubernetes.io/projected/ef2bba04-9b56-460b-8ad2-c5be6d08f79d-kube-api-access-wn7qc\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj\" (UID: \"ef2bba04-9b56-460b-8ad2-c5be6d08f79d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.504037 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: E1128 15:38:08.504673 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 15:38:08 crc kubenswrapper[4884]: E1128 15:38:08.504742 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert podName:fe380e38-07d1-423d-8a43-51f2a0dc154f nodeName:}" failed. No retries permitted until 2025-11-28 15:38:09.0047235 +0000 UTC m=+1128.567507301 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert") pod "openstack-operator-controller-manager-69699fdd55-xr4cn" (UID: "fe380e38-07d1-423d-8a43-51f2a0dc154f") : secret "webhook-server-cert" not found Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.504806 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn7qc\" (UniqueName: \"kubernetes.io/projected/ef2bba04-9b56-460b-8ad2-c5be6d08f79d-kube-api-access-wn7qc\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj\" (UID: \"ef2bba04-9b56-460b-8ad2-c5be6d08f79d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.504860 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.505043 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wz488\" (UniqueName: \"kubernetes.io/projected/fe380e38-07d1-423d-8a43-51f2a0dc154f-kube-api-access-wz488\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.513376 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a96a807f-9e3f-426a-bd7c-b1c14db24baa-cert\") pod \"openstack-baremetal-operator-controller-manager-77868f484-gt8db\" (UID: \"a96a807f-9e3f-426a-bd7c-b1c14db24baa\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.530895 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wz488\" (UniqueName: \"kubernetes.io/projected/fe380e38-07d1-423d-8a43-51f2a0dc154f-kube-api-access-wz488\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.546830 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn7qc\" (UniqueName: \"kubernetes.io/projected/ef2bba04-9b56-460b-8ad2-c5be6d08f79d-kube-api-access-wn7qc\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj\" (UID: \"ef2bba04-9b56-460b-8ad2-c5be6d08f79d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.640494 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.691052 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.783156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" event={"ID":"ef79df92-f12e-4606-8b3d-27b23a1bc3c7","Type":"ContainerStarted","Data":"3dcaaf28e9c11f85a710285f2254e49c6afe81611dea02ddc300b8c55a9c21f9"} Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.811878 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.835038 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.839115 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.848480 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.854269 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.879930 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj"] Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.885446 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l"] Nov 28 15:38:08 crc kubenswrapper[4884]: W1128 15:38:08.892257 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfca659fc_dce8_4284_a2a4_bbeb59993bcf.slice/crio-757a8810a3ae036c5161bbded05756c50e32822e74d8b4f0b7f660af6a3d4ffc WatchSource:0}: Error finding container 757a8810a3ae036c5161bbded05756c50e32822e74d8b4f0b7f660af6a3d4ffc: Status 404 returned error can't find the container with id 757a8810a3ae036c5161bbded05756c50e32822e74d8b4f0b7f660af6a3d4ffc Nov 28 15:38:08 crc kubenswrapper[4884]: W1128 15:38:08.896196 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bcfffae_7352_42ae_9fe3_c0a6e85a9301.slice/crio-6d91fd73844b765d1349ee9e08e6a383eef58da534b8818854fc0bfcc639064b WatchSource:0}: Error finding container 6d91fd73844b765d1349ee9e08e6a383eef58da534b8818854fc0bfcc639064b: Status 404 returned error can't find the container with id 6d91fd73844b765d1349ee9e08e6a383eef58da534b8818854fc0bfcc639064b Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.896198 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-58879495c-lkq65"] Nov 28 15:38:08 crc kubenswrapper[4884]: W1128 15:38:08.908248 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e61efa8_cdc4_4974_ab4b_5f81e26cd439.slice/crio-77ef7eb4b523fdc2d928cd199a9576a47d1c8a28110d9fe3b02cfdc6dcd37075 WatchSource:0}: Error finding container 77ef7eb4b523fdc2d928cd199a9576a47d1c8a28110d9fe3b02cfdc6dcd37075: Status 404 returned error can't find the container with id 77ef7eb4b523fdc2d928cd199a9576a47d1c8a28110d9fe3b02cfdc6dcd37075 Nov 28 15:38:08 crc kubenswrapper[4884]: I1128 15:38:08.910587 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7"] Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.012811 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr"] Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.014349 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23a976c8_631a_4d07_a1a1_d44fdbac2e06.slice/crio-ba375344ee39d1339eda6a85ec767833b1d4a1abe84bb0bdf23a376b8fcf9ade WatchSource:0}: Error finding container ba375344ee39d1339eda6a85ec767833b1d4a1abe84bb0bdf23a376b8fcf9ade: Status 404 returned error can't find the container with id ba375344ee39d1339eda6a85ec767833b1d4a1abe84bb0bdf23a376b8fcf9ade Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.014727 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.014904 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.014962 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert podName:fe380e38-07d1-423d-8a43-51f2a0dc154f nodeName:}" failed. No retries permitted until 2025-11-28 15:38:10.014947973 +0000 UTC m=+1129.577731774 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert") pod "openstack-operator-controller-manager-69699fdd55-xr4cn" (UID: "fe380e38-07d1-423d-8a43-51f2a0dc154f") : secret "webhook-server-cert" not found Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.019947 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz"] Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.023263 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ksbpr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-79cc9d59f5-zd6gg_openstack-operators(36d673e0-6b01-4296-829d-ce3c935876ad): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.037794 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg"] Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.049959 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79d658b66d-422zp"] Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.057162 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv"] Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.060149 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fb9fd98_178d_4461_a090_ae0a6b35e258.slice/crio-6bea0425ae149beada7863440bdcbf029dfa6790a593f1b6a09cddf4eae5ddf0 WatchSource:0}: Error finding container 6bea0425ae149beada7863440bdcbf029dfa6790a593f1b6a09cddf4eae5ddf0: Status 404 returned error can't find the container with id 6bea0425ae149beada7863440bdcbf029dfa6790a593f1b6a09cddf4eae5ddf0 Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.061065 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tlfsl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-bb86466d8-5g9m9_openstack-operators(7a96a060-30e3-4405-83cb-7a9b85d24c84): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.062311 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gn8wb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-5b67cfc8fb-89jvx_openstack-operators(a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.063596 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9"] Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.063790 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bd49m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-8f6687c44-5wqmv_openstack-operators(6fb9fd98-178d-4461-a090-ae0a6b35e258): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.068459 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx"] Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.071988 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26e94d5b_3a3c_49d9_910f_4f339780ec2f.slice/crio-ad735b77a3d6849498df69592361f0cb5e919bfb152ada63f1c4de919513dba4 WatchSource:0}: Error finding container ad735b77a3d6849498df69592361f0cb5e919bfb152ada63f1c4de919513dba4: Status 404 returned error can't find the container with id ad735b77a3d6849498df69592361f0cb5e919bfb152ada63f1c4de919513dba4 Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.079916 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-526xs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79d658b66d-422zp_openstack-operators(26e94d5b-3a3c-49d9-910f-4f339780ec2f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.116434 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.127887 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/19969fcc-a2cf-4ed4-afd5-4d585f5dcb70-cert\") pod \"infra-operator-controller-manager-6c55d8d69b-5qkwv\" (UID: \"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70\") " pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.173485 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2"] Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.177387 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.177966 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw"] Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.180516 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac6ec22d_b6e9_49c7_9c84_222e82ba75d2.slice/crio-6b622310af407b8e8e18176df9c2bd73700a5e3b26ef641eaf5ba47046a0a7d7 WatchSource:0}: Error finding container 6b622310af407b8e8e18176df9c2bd73700a5e3b26ef641eaf5ba47046a0a7d7: Status 404 returned error can't find the container with id 6b622310af407b8e8e18176df9c2bd73700a5e3b26ef641eaf5ba47046a0a7d7 Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.184664 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53444326_ebe6_45f5_a086_63ef03d1533a.slice/crio-745e25a8ec7a3b0d13e5ab9beb577a93f896b62d613e024214de84cacb0fd596 WatchSource:0}: Error finding container 745e25a8ec7a3b0d13e5ab9beb577a93f896b62d613e024214de84cacb0fd596: Status 404 returned error can't find the container with id 745e25a8ec7a3b0d13e5ab9beb577a93f896b62d613e024214de84cacb0fd596 Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.190412 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z7qsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-695797c565-zfmh2_openstack-operators(53444326-ebe6-45f5-a086-63ef03d1533a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.190481 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n8hm7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b56b8849f-lsxjw_openstack-operators(ac6ec22d-b6e9-49c7-9c84-222e82ba75d2): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.239758 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj"] Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.254937 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db"] Nov 28 15:38:09 crc kubenswrapper[4884]: E1128 15:38:09.267710 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wz2pz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-77868f484-gt8db_openstack-operators(a96a807f-9e3f-426a-bd7c-b1c14db24baa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.657325 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv"] Nov 28 15:38:09 crc kubenswrapper[4884]: W1128 15:38:09.663274 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19969fcc_a2cf_4ed4_afd5_4d585f5dcb70.slice/crio-739ed1a4b101882d82d1f3ea46269451f60c7d102c17ec2af91d45f20678a8d2 WatchSource:0}: Error finding container 739ed1a4b101882d82d1f3ea46269451f60c7d102c17ec2af91d45f20678a8d2: Status 404 returned error can't find the container with id 739ed1a4b101882d82d1f3ea46269451f60c7d102c17ec2af91d45f20678a8d2 Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.789906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" event={"ID":"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2","Type":"ContainerStarted","Data":"6b622310af407b8e8e18176df9c2bd73700a5e3b26ef641eaf5ba47046a0a7d7"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.790760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" event={"ID":"6c2798c4-5cb6-4d34-9178-1d422470bbb1","Type":"ContainerStarted","Data":"2b33b42eb1e8a9a2bcdee85591793ccd086181503360896b617068ace945cb65"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.791611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" event={"ID":"20081e0d-0be1-46c4-a60a-49034215b26d","Type":"ContainerStarted","Data":"39c1d709e7c47c926d8a4a51265c43b344f2f9580c5834ea8131b5d2c938d46f"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.792666 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" event={"ID":"53444326-ebe6-45f5-a086-63ef03d1533a","Type":"ContainerStarted","Data":"745e25a8ec7a3b0d13e5ab9beb577a93f896b62d613e024214de84cacb0fd596"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.793643 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" event={"ID":"a96a807f-9e3f-426a-bd7c-b1c14db24baa","Type":"ContainerStarted","Data":"ecbbe8a299e9eab6f3c3137e1d8c4c8f0ba5a209f1b0a45db17687ae7b149367"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.795108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" event={"ID":"c54a7e19-62d7-474b-90f2-0411cc2a1942","Type":"ContainerStarted","Data":"9e2e5a6aa0f8e0c5d76b6711100cd96a41644e90de7534ea6bd1b44d17cc63db"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.796157 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" event={"ID":"ef2bba04-9b56-460b-8ad2-c5be6d08f79d","Type":"ContainerStarted","Data":"ab7ccb3a1bd18edc275b3c7cb7bd3e29fe6d8c382362ef2730cecfe737effc29"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.796954 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" event={"ID":"36d673e0-6b01-4296-829d-ce3c935876ad","Type":"ContainerStarted","Data":"36432dccfb48c44dce4efd203cc46174eb5314a01f588e19851d52773f4608b9"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.797751 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" event={"ID":"26e94d5b-3a3c-49d9-910f-4f339780ec2f","Type":"ContainerStarted","Data":"ad735b77a3d6849498df69592361f0cb5e919bfb152ada63f1c4de919513dba4"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.798782 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" event={"ID":"6fb9fd98-178d-4461-a090-ae0a6b35e258","Type":"ContainerStarted","Data":"6bea0425ae149beada7863440bdcbf029dfa6790a593f1b6a09cddf4eae5ddf0"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.799624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" event={"ID":"7a96a060-30e3-4405-83cb-7a9b85d24c84","Type":"ContainerStarted","Data":"158b07c5acab896036153944d3148565c28c6a9edd82ad55a9fd14506aa38804"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.800448 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" event={"ID":"4e61efa8-cdc4-4974-ab4b-5f81e26cd439","Type":"ContainerStarted","Data":"77ef7eb4b523fdc2d928cd199a9576a47d1c8a28110d9fe3b02cfdc6dcd37075"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.801353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" event={"ID":"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d","Type":"ContainerStarted","Data":"2a62aa9f156218c8ec419207c1a42027d7bc1c3eac1178b2bd606488b6936639"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.802293 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" event={"ID":"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70","Type":"ContainerStarted","Data":"739ed1a4b101882d82d1f3ea46269451f60c7d102c17ec2af91d45f20678a8d2"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.803245 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" event={"ID":"9831a921-a48d-4446-a5da-648cebb21936","Type":"ContainerStarted","Data":"bd343ea2e116b1ffd7d9068e10869618cb916973f3e255f9afe9e6fd0a2f1acc"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.808668 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" event={"ID":"eb06d6ab-34b7-4511-9680-6903fe6d50b7","Type":"ContainerStarted","Data":"3adcdc89e44c86af082208ad31091c0791b775be3160dc613d55ba084a5d8c9c"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.809675 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" event={"ID":"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9","Type":"ContainerStarted","Data":"856a102e489d7381c92e6d42368763be9dc38c4620c157a2f47dddb2260f3766"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.810729 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" event={"ID":"8bcfffae-7352-42ae-9fe3-c0a6e85a9301","Type":"ContainerStarted","Data":"6d91fd73844b765d1349ee9e08e6a383eef58da534b8818854fc0bfcc639064b"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.811829 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" event={"ID":"262d6bdb-061f-406a-bd20-c0e17112188a","Type":"ContainerStarted","Data":"8b2b213f96096d1676fd9ad39494e8d1a74ab07543989668077d4602aa0092c2"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.812906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" event={"ID":"23a976c8-631a-4d07-a1a1-d44fdbac2e06","Type":"ContainerStarted","Data":"ba375344ee39d1339eda6a85ec767833b1d4a1abe84bb0bdf23a376b8fcf9ade"} Nov 28 15:38:09 crc kubenswrapper[4884]: I1128 15:38:09.813774 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" event={"ID":"fca659fc-dce8-4284-a2a4-bbeb59993bcf","Type":"ContainerStarted","Data":"757a8810a3ae036c5161bbded05756c50e32822e74d8b4f0b7f660af6a3d4ffc"} Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.047061 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.064812 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe380e38-07d1-423d-8a43-51f2a0dc154f-cert\") pod \"openstack-operator-controller-manager-69699fdd55-xr4cn\" (UID: \"fe380e38-07d1-423d-8a43-51f2a0dc154f\") " pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.361306 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.573854 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" podUID="26e94d5b-3a3c-49d9-910f-4f339780ec2f" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.578254 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" podUID="53444326-ebe6-45f5-a086-63ef03d1533a" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.592497 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" podUID="36d673e0-6b01-4296-829d-ce3c935876ad" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.592627 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" podUID="6fb9fd98-178d-4461-a090-ae0a6b35e258" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.597451 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" podUID="a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.597603 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" podUID="ac6ec22d-b6e9-49c7-9c84-222e82ba75d2" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.601725 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" podUID="a96a807f-9e3f-426a-bd7c-b1c14db24baa" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.673218 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" podUID="7a96a060-30e3-4405-83cb-7a9b85d24c84" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.769451 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn"] Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.823231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" event={"ID":"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9","Type":"ContainerStarted","Data":"084cbeb91615b21f15b10ded2e6feda9eb0111f616eb962cd2210ca9cb7b9ef0"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.824766 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" podUID="a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.825231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" event={"ID":"fe380e38-07d1-423d-8a43-51f2a0dc154f","Type":"ContainerStarted","Data":"bcd31691e278449414378a0e7d44661f74313752458d48aafc1df09d49e09e04"} Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.826249 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" event={"ID":"a96a807f-9e3f-426a-bd7c-b1c14db24baa","Type":"ContainerStarted","Data":"de9621bb1b3b8b2053f6b4c567417b07ec074283c94cf09b51ae991f90d4c873"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.827158 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" podUID="a96a807f-9e3f-426a-bd7c-b1c14db24baa" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.840927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" event={"ID":"36d673e0-6b01-4296-829d-ce3c935876ad","Type":"ContainerStarted","Data":"d8550b718f01e263105a49b4659d05618ccae40e82a05bc4e768e4de8fa3c118"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.842386 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" podUID="36d673e0-6b01-4296-829d-ce3c935876ad" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.865300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" event={"ID":"53444326-ebe6-45f5-a086-63ef03d1533a","Type":"ContainerStarted","Data":"1fee67975d203451a66f9cc3c4415d6e576e1be61191597a68f331ed4edc80ad"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.866614 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" podUID="53444326-ebe6-45f5-a086-63ef03d1533a" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.881065 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" event={"ID":"7a96a060-30e3-4405-83cb-7a9b85d24c84","Type":"ContainerStarted","Data":"d7b767312c9fb4bbc39ef3f7ab80d3441a22be3008959d967936f4ad6d970cc1"} Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.888379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" event={"ID":"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2","Type":"ContainerStarted","Data":"211483c915e78ab05bd8caf7a0ff6539c63eab4961dceb5f4aa5284b1a4dcbb3"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.889998 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" podUID="7a96a060-30e3-4405-83cb-7a9b85d24c84" Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.890050 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" podUID="ac6ec22d-b6e9-49c7-9c84-222e82ba75d2" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.890518 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" event={"ID":"26e94d5b-3a3c-49d9-910f-4f339780ec2f","Type":"ContainerStarted","Data":"7db602235238db5690b096296344fea4e2e3e78a4a294b7fcac70be343b0a07d"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.894022 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" podUID="26e94d5b-3a3c-49d9-910f-4f339780ec2f" Nov 28 15:38:10 crc kubenswrapper[4884]: I1128 15:38:10.907255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" event={"ID":"6fb9fd98-178d-4461-a090-ae0a6b35e258","Type":"ContainerStarted","Data":"438290b465c7418cd9fd83e68a9c862f4eda2e0f11d14cc19f68ec25272d6ce2"} Nov 28 15:38:10 crc kubenswrapper[4884]: E1128 15:38:10.909037 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" podUID="6fb9fd98-178d-4461-a090-ae0a6b35e258" Nov 28 15:38:11 crc kubenswrapper[4884]: I1128 15:38:11.919512 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" event={"ID":"fe380e38-07d1-423d-8a43-51f2a0dc154f","Type":"ContainerStarted","Data":"22aa8bb2fcafe75e7fcc8dad05b3009ffc44cbb04bf66f119e4aa25e2ef7dd43"} Nov 28 15:38:11 crc kubenswrapper[4884]: I1128 15:38:11.919549 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" event={"ID":"fe380e38-07d1-423d-8a43-51f2a0dc154f","Type":"ContainerStarted","Data":"c18256509dbf806452d8f24a5d006a294aa5b8022543bde96eff1c8ed9651cff"} Nov 28 15:38:11 crc kubenswrapper[4884]: I1128 15:38:11.919564 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.924730 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:1988aaf9cd245150cda123aaaa21718ccb552c47f1623b7d68804f13c47f2c6a\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" podUID="ac6ec22d-b6e9-49c7-9c84-222e82ba75d2" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.925205 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:debe5d6d29a007374b270b0e114e69b2136eee61dabab8576baf4010c951edb9\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" podUID="26e94d5b-3a3c-49d9-910f-4f339780ec2f" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.925717 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:49180c7bd4f0071e43ae7044260a3a97c4aa34fcbcb2d0d4573df449765ed391\\\"\"" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" podUID="7a96a060-30e3-4405-83cb-7a9b85d24c84" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.925796 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:66928f0eae5206f671ac7b21f79953e37009c54187d768dc6e03fe0a3d202b3b\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" podUID="a96a807f-9e3f-426a-bd7c-b1c14db24baa" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.925845 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:2c837009de6475bc22534827c03df6d8649277b71f1c30de2087b6c52aafb326\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" podUID="a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.926151 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:78d91c3cdd5eda41c2cd6d4a8491844e161dc33f6221be8cb822b2107d7ff46f\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" podUID="53444326-ebe6-45f5-a086-63ef03d1533a" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.926214 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:f076b8d9e85881d9c3cb5272b13db7f5e05d2e9da884c17b677a844112831907\\\"\"" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" podUID="6fb9fd98-178d-4461-a090-ae0a6b35e258" Nov 28 15:38:11 crc kubenswrapper[4884]: E1128 15:38:11.927083 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:4f799c74da2f1c864af24fcd5efd91ec64848972a95246eac6b5c6c4d71c1756\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" podUID="36d673e0-6b01-4296-829d-ce3c935876ad" Nov 28 15:38:12 crc kubenswrapper[4884]: I1128 15:38:12.035869 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" podStartSLOduration=5.035855221 podStartE2EDuration="5.035855221s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:38:12.031230486 +0000 UTC m=+1131.594014297" watchObservedRunningTime="2025-11-28 15:38:12.035855221 +0000 UTC m=+1131.598639022" Nov 28 15:38:20 crc kubenswrapper[4884]: I1128 15:38:20.368934 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-69699fdd55-xr4cn" Nov 28 15:38:21 crc kubenswrapper[4884]: I1128 15:38:21.243072 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:38:21 crc kubenswrapper[4884]: I1128 15:38:21.243178 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:38:22 crc kubenswrapper[4884]: E1128 15:38:22.623149 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57" Nov 28 15:38:22 crc kubenswrapper[4884]: E1128 15:38:22.623549 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d87cd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-698d6fd7d6-4k5mj_openstack-operators(fca659fc-dce8-4284-a2a4-bbeb59993bcf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:23 crc kubenswrapper[4884]: E1128 15:38:23.233791 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c" Nov 28 15:38:23 crc kubenswrapper[4884]: E1128 15:38:23.233991 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pv99l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-54485f899-5xhvn_openstack-operators(c4d80b0a-9e14-4a7a-8461-129c8cb07e9d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:23 crc kubenswrapper[4884]: E1128 15:38:23.973271 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d" Nov 28 15:38:23 crc kubenswrapper[4884]: E1128 15:38:23.973851 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ckgq7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-867d87977b-zcwcr_openstack-operators(23a976c8-631a-4d07-a1a1-d44fdbac2e06): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:24 crc kubenswrapper[4884]: E1128 15:38:24.502566 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4" Nov 28 15:38:24 crc kubenswrapper[4884]: E1128 15:38:24.502713 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-llsmm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6c55d8d69b-5qkwv_openstack-operators(19969fcc-a2cf-4ed4-afd5-4d585f5dcb70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:25 crc kubenswrapper[4884]: E1128 15:38:25.003798 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31" Nov 28 15:38:25 crc kubenswrapper[4884]: E1128 15:38:25.004133 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bhlmx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-85fbd69fcd-ddjbx_openstack-operators(9831a921-a48d-4446-a5da-648cebb21936): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:25 crc kubenswrapper[4884]: E1128 15:38:25.417483 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8" Nov 28 15:38:25 crc kubenswrapper[4884]: E1128 15:38:25.417966 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v8ckm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5cbc8c7f96-qbh5l_openstack-operators(8bcfffae-7352-42ae-9fe3-c0a6e85a9301): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.692702 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" podUID="fca659fc-dce8-4284-a2a4-bbeb59993bcf" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.848550 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" podUID="8bcfffae-7352-42ae-9fe3-c0a6e85a9301" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.853714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" podUID="19969fcc-a2cf-4ed4-afd5-4d585f5dcb70" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.864277 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" podUID="9831a921-a48d-4446-a5da-648cebb21936" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.864378 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" podUID="23a976c8-631a-4d07-a1a1-d44fdbac2e06" Nov 28 15:38:28 crc kubenswrapper[4884]: E1128 15:38:28.864995 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" podUID="c4d80b0a-9e14-4a7a-8461-129c8cb07e9d" Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.478763 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" event={"ID":"fca659fc-dce8-4284-a2a4-bbeb59993bcf","Type":"ContainerStarted","Data":"b8f2e74dbba4f5b883ef6b802ae11ba4a0cf53d7b00ea8454df497df6ae913a4"} Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.487059 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57\\\"\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" podUID="fca659fc-dce8-4284-a2a4-bbeb59993bcf" Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.491798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" event={"ID":"8bcfffae-7352-42ae-9fe3-c0a6e85a9301","Type":"ContainerStarted","Data":"0474cb2de83ff72c65574ebc9284c4b75e4134d0e0b54d6c326090ad653e3bd9"} Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.493037 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" podUID="8bcfffae-7352-42ae-9fe3-c0a6e85a9301" Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.494450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" event={"ID":"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d","Type":"ContainerStarted","Data":"fb5c255bae191166e395004dda91ac25fc9e95a774aa3f56bce00cf329cd415a"} Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.496705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" event={"ID":"23a976c8-631a-4d07-a1a1-d44fdbac2e06","Type":"ContainerStarted","Data":"a9a392d42d39fa126665d4daeb7aa176b1f95cc4e6a36ea1dea08b14df44704c"} Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.498634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" event={"ID":"ef79df92-f12e-4606-8b3d-27b23a1bc3c7","Type":"ContainerStarted","Data":"775178eef83f86481fb90560b79bf51c6308d2b055e0debd1632925908482cf5"} Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.499932 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" event={"ID":"6c2798c4-5cb6-4d34-9178-1d422470bbb1","Type":"ContainerStarted","Data":"f768fcf258e8010fd8e2e6fdc795f21e8ddd0885c3f2b017100eb3e3a5727445"} Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.505222 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" podUID="c4d80b0a-9e14-4a7a-8461-129c8cb07e9d" Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.506123 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" event={"ID":"9831a921-a48d-4446-a5da-648cebb21936","Type":"ContainerStarted","Data":"fe22a09fbc87288f55de270bdd4762afbf3de77e514670f66d138efd0e5b630f"} Nov 28 15:38:29 crc kubenswrapper[4884]: I1128 15:38:29.508237 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" event={"ID":"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70","Type":"ContainerStarted","Data":"609957dcd72c0e9d29599392757f1ea8b620230d86786f2912c72eaf9bc424e7"} Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.517192 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31\\\"\"" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" podUID="9831a921-a48d-4446-a5da-648cebb21936" Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.517276 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" podUID="23a976c8-631a-4d07-a1a1-d44fdbac2e06" Nov 28 15:38:29 crc kubenswrapper[4884]: E1128 15:38:29.540287 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" podUID="19969fcc-a2cf-4ed4-afd5-4d585f5dcb70" Nov 28 15:38:30 crc kubenswrapper[4884]: I1128 15:38:30.518760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" event={"ID":"20081e0d-0be1-46c4-a60a-49034215b26d","Type":"ContainerStarted","Data":"1c2f18bafed84c83184c632b96b7bef569288cbc53d43edf91fba6f5ab72a77f"} Nov 28 15:38:30 crc kubenswrapper[4884]: I1128 15:38:30.520928 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" event={"ID":"4e61efa8-cdc4-4974-ab4b-5f81e26cd439","Type":"ContainerStarted","Data":"86c497849bbb026e7c44c38bfa11ef60d91f03022c7d0cbd773d93effc8b3541"} Nov 28 15:38:30 crc kubenswrapper[4884]: I1128 15:38:30.523044 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" event={"ID":"262d6bdb-061f-406a-bd20-c0e17112188a","Type":"ContainerStarted","Data":"962ccf6b71d62b2705c9bccee4c89abe867a2e4499ff6b6b41694e41c1ee6ebe"} Nov 28 15:38:30 crc kubenswrapper[4884]: I1128 15:38:30.525566 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" event={"ID":"eb06d6ab-34b7-4511-9680-6903fe6d50b7","Type":"ContainerStarted","Data":"79df25db7aafcd02fa513fd4d917f40c74cb0b995a5e56626f237d0ac378247b"} Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537039 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:57d9cb0034a7d5c7a39410fcb619ade2010e6855344dc3a0bc2bfd98cdf345d8\\\"\"" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" podUID="8bcfffae-7352-42ae-9fe3-c0a6e85a9301" Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537690 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:f4b6baa2b8a661351cfc24fff5aacee5aa4198106618700cfa47ec3a75f88b31\\\"\"" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" podUID="9831a921-a48d-4446-a5da-648cebb21936" Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537787 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:9aee58b2ca71ef9c4f12373090951090d13aa7038d0fef07ec30167f3d6ae23c\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" podUID="c4d80b0a-9e14-4a7a-8461-129c8cb07e9d" Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537860 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:1739eeeb2c05142ddf835739758ffd04ad06cad353125e2ceff687f237ecda57\\\"\"" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" podUID="fca659fc-dce8-4284-a2a4-bbeb59993bcf" Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537928 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:fd917de0cf800ec284ee0c3f2906a06d85ea18cb75a5b06c8eb305750467986d\\\"\"" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" podUID="23a976c8-631a-4d07-a1a1-d44fdbac2e06" Nov 28 15:38:30 crc kubenswrapper[4884]: E1128 15:38:30.537992 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:e1a731922a2da70b224ce5396602a07cec2b4a79efe7bcdc17c5e4509d16b5e4\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" podUID="19969fcc-a2cf-4ed4-afd5-4d585f5dcb70" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.541697 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" event={"ID":"ef79df92-f12e-4606-8b3d-27b23a1bc3c7","Type":"ContainerStarted","Data":"fb9eaf745d6db73cb342bc3ac99142bb75084650520b9ca8a94ca776af29025a"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.542194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.545556 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" event={"ID":"20081e0d-0be1-46c4-a60a-49034215b26d","Type":"ContainerStarted","Data":"8cf42d1474b76442b205c4cb6fc8969e131d26cfa6c2e4d0f9e6158df7cc3584"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.545665 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.547548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" event={"ID":"c54a7e19-62d7-474b-90f2-0411cc2a1942","Type":"ContainerStarted","Data":"d6195dee84ecc7f45b20d03dbb538442d4c93b6fece0d52f44bfa4df5a9002be"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.547596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" event={"ID":"c54a7e19-62d7-474b-90f2-0411cc2a1942","Type":"ContainerStarted","Data":"eb5ba63fc5b4e7baa57c05bc94a172a2c8adf96931a0f855242d7b7e13e4a5c1"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.547645 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.549325 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" event={"ID":"6fb9fd98-178d-4461-a090-ae0a6b35e258","Type":"ContainerStarted","Data":"7a3911f0cd9c3e9c7511516463cc4981542c3d300c2d89b6e7736a1d99f77dc2"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.549479 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.553050 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" event={"ID":"a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9","Type":"ContainerStarted","Data":"3435270b27e92d0ed58819b7cf66ea0bcc9b0dbc16669ac33f081cbe47bb2175"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.553537 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.554838 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" event={"ID":"262d6bdb-061f-406a-bd20-c0e17112188a","Type":"ContainerStarted","Data":"137d1ccca09aeaaaeb5a3f572482ab0d047b2fe3557abac86881024c1a31d778"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.555260 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.556770 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" event={"ID":"a96a807f-9e3f-426a-bd7c-b1c14db24baa","Type":"ContainerStarted","Data":"632a13adf31db76a0fc6b977ed49562cd0866296f1e47101d5ca02d734370d2b"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.557447 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.559147 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" event={"ID":"53444326-ebe6-45f5-a086-63ef03d1533a","Type":"ContainerStarted","Data":"bcc5cbe600a930740b982c990520493143898798ffce929384450e14da4a96a5"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.559282 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.560684 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" event={"ID":"ac6ec22d-b6e9-49c7-9c84-222e82ba75d2","Type":"ContainerStarted","Data":"1d5002aa3b273231ff35d56a3672effe3aced898b00b0d8846104ca0c473a0c3"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.560881 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.562015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" event={"ID":"ef2bba04-9b56-460b-8ad2-c5be6d08f79d","Type":"ContainerStarted","Data":"369d1aec57963ca241b92836a969608f02747f7cc631545b7f523a02f163cd20"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.563916 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" event={"ID":"26e94d5b-3a3c-49d9-910f-4f339780ec2f","Type":"ContainerStarted","Data":"f93182d10b4ed3c0f057cc8b3e2f0e3332dea6828555be730e939586de17423c"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.564105 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.565191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" event={"ID":"4e61efa8-cdc4-4974-ab4b-5f81e26cd439","Type":"ContainerStarted","Data":"791a6e9f61f674729508af7eff00e85bc1f8eb44cc7b930bd4b00755ddc21b88"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.565550 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.566760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" event={"ID":"7a96a060-30e3-4405-83cb-7a9b85d24c84","Type":"ContainerStarted","Data":"0c503ffb196d42e6be283912bf2440cd4c3186c728d3ba2b60ff3bcb99840191"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.567111 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.568407 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" event={"ID":"6c2798c4-5cb6-4d34-9178-1d422470bbb1","Type":"ContainerStarted","Data":"f2b972ce07605fcc1490ecedb42b25f5393d20584fe2e24e37069029da78b054"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.568883 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.570130 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" podStartSLOduration=8.135103122 podStartE2EDuration="25.5701147s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.445064477 +0000 UTC m=+1128.007848278" lastFinishedPulling="2025-11-28 15:38:25.880076055 +0000 UTC m=+1145.442859856" observedRunningTime="2025-11-28 15:38:32.567901104 +0000 UTC m=+1152.130684925" watchObservedRunningTime="2025-11-28 15:38:32.5701147 +0000 UTC m=+1152.132898501" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.570670 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" event={"ID":"eb06d6ab-34b7-4511-9680-6903fe6d50b7","Type":"ContainerStarted","Data":"8f01af213ffa0c48a3ee11f4995b15d17869a48957a9e24d3f9f0b7866283ad2"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.571207 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.572716 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" event={"ID":"36d673e0-6b01-4296-829d-ce3c935876ad","Type":"ContainerStarted","Data":"fac5c81795182e5d3db738b2a73ec47107e5d0d841e5d3091143a10a058fd177"} Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.573273 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.607637 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" podStartSLOduration=3.204321701 podStartE2EDuration="25.607620195s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.079781133 +0000 UTC m=+1128.642564944" lastFinishedPulling="2025-11-28 15:38:31.483079627 +0000 UTC m=+1151.045863438" observedRunningTime="2025-11-28 15:38:32.603052312 +0000 UTC m=+1152.165836103" watchObservedRunningTime="2025-11-28 15:38:32.607620195 +0000 UTC m=+1152.170403996" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.628003 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" podStartSLOduration=6.365365167 podStartE2EDuration="25.627985418s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.267383574 +0000 UTC m=+1128.830167375" lastFinishedPulling="2025-11-28 15:38:28.530003805 +0000 UTC m=+1148.092787626" observedRunningTime="2025-11-28 15:38:32.626152153 +0000 UTC m=+1152.188935964" watchObservedRunningTime="2025-11-28 15:38:32.627985418 +0000 UTC m=+1152.190769219" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.653499 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" podStartSLOduration=8.640166238 podStartE2EDuration="25.653478127s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.866760876 +0000 UTC m=+1128.429544677" lastFinishedPulling="2025-11-28 15:38:25.880072765 +0000 UTC m=+1145.442856566" observedRunningTime="2025-11-28 15:38:32.652032191 +0000 UTC m=+1152.214815992" watchObservedRunningTime="2025-11-28 15:38:32.653478127 +0000 UTC m=+1152.216261928" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.672288 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj" podStartSLOduration=8.051499552 podStartE2EDuration="24.672271661s" podCreationTimestamp="2025-11-28 15:38:08 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.260516586 +0000 UTC m=+1128.823300387" lastFinishedPulling="2025-11-28 15:38:25.881288695 +0000 UTC m=+1145.444072496" observedRunningTime="2025-11-28 15:38:32.668653592 +0000 UTC m=+1152.231437383" watchObservedRunningTime="2025-11-28 15:38:32.672271661 +0000 UTC m=+1152.235055462" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.728062 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" podStartSLOduration=6.758111491 podStartE2EDuration="25.728042147s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.063683957 +0000 UTC m=+1128.626467758" lastFinishedPulling="2025-11-28 15:38:28.033614603 +0000 UTC m=+1147.596398414" observedRunningTime="2025-11-28 15:38:32.697009851 +0000 UTC m=+1152.259793662" watchObservedRunningTime="2025-11-28 15:38:32.728042147 +0000 UTC m=+1152.290825948" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.728444 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" podStartSLOduration=8.761375920999999 podStartE2EDuration="25.728437978s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.914865284 +0000 UTC m=+1128.477649085" lastFinishedPulling="2025-11-28 15:38:25.881927341 +0000 UTC m=+1145.444711142" observedRunningTime="2025-11-28 15:38:32.722763808 +0000 UTC m=+1152.285547609" watchObservedRunningTime="2025-11-28 15:38:32.728437978 +0000 UTC m=+1152.291221789" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.774351 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" podStartSLOduration=4.782373954 podStartE2EDuration="25.77433442s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.060939389 +0000 UTC m=+1128.623723190" lastFinishedPulling="2025-11-28 15:38:30.052899845 +0000 UTC m=+1149.615683656" observedRunningTime="2025-11-28 15:38:32.773887449 +0000 UTC m=+1152.336671250" watchObservedRunningTime="2025-11-28 15:38:32.77433442 +0000 UTC m=+1152.337118221" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.777708 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" podStartSLOduration=8.813905026 podStartE2EDuration="25.777701933s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.918220686 +0000 UTC m=+1128.481004487" lastFinishedPulling="2025-11-28 15:38:25.882017593 +0000 UTC m=+1145.444801394" observedRunningTime="2025-11-28 15:38:32.755983897 +0000 UTC m=+1152.318767698" watchObservedRunningTime="2025-11-28 15:38:32.777701933 +0000 UTC m=+1152.340485734" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.790647 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" podStartSLOduration=6.496502073 podStartE2EDuration="25.790632292s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.190350993 +0000 UTC m=+1128.753134794" lastFinishedPulling="2025-11-28 15:38:28.484481212 +0000 UTC m=+1148.047265013" observedRunningTime="2025-11-28 15:38:32.788215082 +0000 UTC m=+1152.350998883" watchObservedRunningTime="2025-11-28 15:38:32.790632292 +0000 UTC m=+1152.353416093" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.825542 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" podStartSLOduration=6.802912008 podStartE2EDuration="25.825524204s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.062236021 +0000 UTC m=+1128.625019822" lastFinishedPulling="2025-11-28 15:38:28.084848217 +0000 UTC m=+1147.647632018" observedRunningTime="2025-11-28 15:38:32.81971113 +0000 UTC m=+1152.382494931" watchObservedRunningTime="2025-11-28 15:38:32.825524204 +0000 UTC m=+1152.388308005" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.837792 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" podStartSLOduration=6.5393963920000004 podStartE2EDuration="25.837775086s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.190283531 +0000 UTC m=+1128.753067332" lastFinishedPulling="2025-11-28 15:38:28.488662205 +0000 UTC m=+1148.051446026" observedRunningTime="2025-11-28 15:38:32.836588087 +0000 UTC m=+1152.399371888" watchObservedRunningTime="2025-11-28 15:38:32.837775086 +0000 UTC m=+1152.400558887" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.857704 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" podStartSLOduration=8.843393215 podStartE2EDuration="25.857683608s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.867642428 +0000 UTC m=+1128.430426229" lastFinishedPulling="2025-11-28 15:38:25.881932821 +0000 UTC m=+1145.444716622" observedRunningTime="2025-11-28 15:38:32.853908694 +0000 UTC m=+1152.416692495" watchObservedRunningTime="2025-11-28 15:38:32.857683608 +0000 UTC m=+1152.420467409" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.882049 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" podStartSLOduration=9.051832418 podStartE2EDuration="25.882033258s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.05169447 +0000 UTC m=+1128.614478271" lastFinishedPulling="2025-11-28 15:38:25.88189531 +0000 UTC m=+1145.444679111" observedRunningTime="2025-11-28 15:38:32.876958294 +0000 UTC m=+1152.439742095" watchObservedRunningTime="2025-11-28 15:38:32.882033258 +0000 UTC m=+1152.444817059" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.902193 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" podStartSLOduration=8.845980539 podStartE2EDuration="25.902176606s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.825312923 +0000 UTC m=+1128.388096724" lastFinishedPulling="2025-11-28 15:38:25.881509 +0000 UTC m=+1145.444292791" observedRunningTime="2025-11-28 15:38:32.901715294 +0000 UTC m=+1152.464499095" watchObservedRunningTime="2025-11-28 15:38:32.902176606 +0000 UTC m=+1152.464960407" Nov 28 15:38:32 crc kubenswrapper[4884]: I1128 15:38:32.928544 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" podStartSLOduration=3.423959764 podStartE2EDuration="25.928528367s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.023121915 +0000 UTC m=+1128.585905736" lastFinishedPulling="2025-11-28 15:38:31.527690538 +0000 UTC m=+1151.090474339" observedRunningTime="2025-11-28 15:38:32.923441421 +0000 UTC m=+1152.486225232" watchObservedRunningTime="2025-11-28 15:38:32.928528367 +0000 UTC m=+1152.491312168" Nov 28 15:38:33 crc kubenswrapper[4884]: I1128 15:38:33.585538 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-7d5d9fd47f-67f2r" Nov 28 15:38:33 crc kubenswrapper[4884]: I1128 15:38:33.585986 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-748967c98-j56b4" Nov 28 15:38:37 crc kubenswrapper[4884]: I1128 15:38:37.562556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-5bfbbb859d-xplsn" Nov 28 15:38:37 crc kubenswrapper[4884]: I1128 15:38:37.591457 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6788cc6d75-5z54f" Nov 28 15:38:37 crc kubenswrapper[4884]: I1128 15:38:37.766714 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-79cc9d59f5-zd6gg" Nov 28 15:38:37 crc kubenswrapper[4884]: I1128 15:38:37.837885 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-64d7c556cd-hrrh7" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.037613 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-58879495c-lkq65" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.040646 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79d658b66d-422zp" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.108892 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5b67cfc8fb-89jvx" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.268797 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-8f6687c44-5wqmv" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.281233 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.302895 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-d5fb87cb8-srsqz" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.312485 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-bb86466d8-5g9m9" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.404548 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b56b8849f-lsxjw" Nov 28 15:38:38 crc kubenswrapper[4884]: I1128 15:38:38.699387 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-77868f484-gt8db" Nov 28 15:38:42 crc kubenswrapper[4884]: I1128 15:38:42.690978 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:38:51 crc kubenswrapper[4884]: I1128 15:38:51.242696 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:38:51 crc kubenswrapper[4884]: I1128 15:38:51.243531 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.742594 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" event={"ID":"23a976c8-631a-4d07-a1a1-d44fdbac2e06","Type":"ContainerStarted","Data":"6a936373559c05447a338757823ca1d8c8ad06bda758724cb4f701dd28182948"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.743281 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.745912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" event={"ID":"9831a921-a48d-4446-a5da-648cebb21936","Type":"ContainerStarted","Data":"d4770dd795d8df8ba373a42b522a4d0d6a668372fa0884091d5ad782a8e1cbc0"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.746131 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.748141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" event={"ID":"19969fcc-a2cf-4ed4-afd5-4d585f5dcb70","Type":"ContainerStarted","Data":"5e2973c0915a8347a9d5bf19d1adc114307dee03c43c124533eeda1da5eef017"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.748427 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.749966 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" event={"ID":"fca659fc-dce8-4284-a2a4-bbeb59993bcf","Type":"ContainerStarted","Data":"11e9f452c649a184651a067dfe70ec85cb7f6506fb763e93dcf1d94612806d31"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.750215 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.778994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" event={"ID":"8bcfffae-7352-42ae-9fe3-c0a6e85a9301","Type":"ContainerStarted","Data":"e312ea7a8ce8aa406cb591480d2c3f0c530bc3cf62042c0811c6b59ca946feae"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.780315 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.801815 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" podStartSLOduration=2.6295384840000002 podStartE2EDuration="48.801794499s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.022267544 +0000 UTC m=+1128.585051355" lastFinishedPulling="2025-11-28 15:38:55.194523569 +0000 UTC m=+1174.757307370" observedRunningTime="2025-11-28 15:38:55.776001223 +0000 UTC m=+1175.338785054" watchObservedRunningTime="2025-11-28 15:38:55.801794499 +0000 UTC m=+1175.364578300" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.802945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" event={"ID":"c4d80b0a-9e14-4a7a-8461-129c8cb07e9d","Type":"ContainerStarted","Data":"35556bf211c47bbbbba474d69c5376d433eba28d92995b7aed558ffe9feeb925"} Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.803174 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.812894 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" podStartSLOduration=3.287047844 podStartE2EDuration="48.812878673s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:09.665182314 +0000 UTC m=+1129.227966115" lastFinishedPulling="2025-11-28 15:38:55.191013113 +0000 UTC m=+1174.753796944" observedRunningTime="2025-11-28 15:38:55.811222352 +0000 UTC m=+1175.374006163" watchObservedRunningTime="2025-11-28 15:38:55.812878673 +0000 UTC m=+1175.375662474" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.833801 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" podStartSLOduration=2.492816259 podStartE2EDuration="48.833780549s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.850073584 +0000 UTC m=+1128.412857385" lastFinishedPulling="2025-11-28 15:38:55.191037874 +0000 UTC m=+1174.753821675" observedRunningTime="2025-11-28 15:38:55.833455671 +0000 UTC m=+1175.396239492" watchObservedRunningTime="2025-11-28 15:38:55.833780549 +0000 UTC m=+1175.396564350" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.857068 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" podStartSLOduration=2.566022876 podStartE2EDuration="48.857045783s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.898918289 +0000 UTC m=+1128.461702080" lastFinishedPulling="2025-11-28 15:38:55.189941186 +0000 UTC m=+1174.752724987" observedRunningTime="2025-11-28 15:38:55.850405869 +0000 UTC m=+1175.413189680" watchObservedRunningTime="2025-11-28 15:38:55.857045783 +0000 UTC m=+1175.419829584" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.876222 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" podStartSLOduration=2.582508164 podStartE2EDuration="48.876201626s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.898300585 +0000 UTC m=+1128.461084386" lastFinishedPulling="2025-11-28 15:38:55.191994047 +0000 UTC m=+1174.754777848" observedRunningTime="2025-11-28 15:38:55.872193167 +0000 UTC m=+1175.434976968" watchObservedRunningTime="2025-11-28 15:38:55.876201626 +0000 UTC m=+1175.438985427" Nov 28 15:38:55 crc kubenswrapper[4884]: I1128 15:38:55.892210 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" podStartSLOduration=2.568460567 podStartE2EDuration="48.892192741s" podCreationTimestamp="2025-11-28 15:38:07 +0000 UTC" firstStartedPulling="2025-11-28 15:38:08.86731111 +0000 UTC m=+1128.430094911" lastFinishedPulling="2025-11-28 15:38:55.191043254 +0000 UTC m=+1174.753827085" observedRunningTime="2025-11-28 15:38:55.890796756 +0000 UTC m=+1175.453580568" watchObservedRunningTime="2025-11-28 15:38:55.892192741 +0000 UTC m=+1175.454976542" Nov 28 15:39:07 crc kubenswrapper[4884]: I1128 15:39:07.566647 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-85fbd69fcd-ddjbx" Nov 28 15:39:07 crc kubenswrapper[4884]: I1128 15:39:07.624365 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-698d6fd7d6-4k5mj" Nov 28 15:39:07 crc kubenswrapper[4884]: I1128 15:39:07.753778 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-54485f899-5xhvn" Nov 28 15:39:07 crc kubenswrapper[4884]: I1128 15:39:07.788872 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5cbc8c7f96-qbh5l" Nov 28 15:39:08 crc kubenswrapper[4884]: I1128 15:39:08.157229 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-867d87977b-zcwcr" Nov 28 15:39:09 crc kubenswrapper[4884]: I1128 15:39:09.185887 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6c55d8d69b-5qkwv" Nov 28 15:39:21 crc kubenswrapper[4884]: I1128 15:39:21.242842 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:39:21 crc kubenswrapper[4884]: I1128 15:39:21.243620 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:39:21 crc kubenswrapper[4884]: I1128 15:39:21.243763 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:39:21 crc kubenswrapper[4884]: I1128 15:39:21.244912 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:39:21 crc kubenswrapper[4884]: I1128 15:39:21.245025 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc" gracePeriod=600 Nov 28 15:39:22 crc kubenswrapper[4884]: I1128 15:39:22.047955 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc" exitCode=0 Nov 28 15:39:22 crc kubenswrapper[4884]: I1128 15:39:22.048528 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc"} Nov 28 15:39:22 crc kubenswrapper[4884]: I1128 15:39:22.048614 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0"} Nov 28 15:39:22 crc kubenswrapper[4884]: I1128 15:39:22.048637 4884 scope.go:117] "RemoveContainer" containerID="df509a7285ba0f4de67851edf0f5010eb933a0baadb76ebfdc9cd205fbf9037b" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.143712 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.145192 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.155353 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.155403 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.155469 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-xf2v7" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.155583 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.155691 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.201514 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.203522 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.206468 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.218318 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.272784 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvjqb\" (UniqueName: \"kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.273154 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.374707 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.374754 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvjqb\" (UniqueName: \"kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.374780 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbxfk\" (UniqueName: \"kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.374800 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.375116 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.375783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.395238 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvjqb\" (UniqueName: \"kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb\") pod \"dnsmasq-dns-675f4bcbfc-g2cpk\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.464998 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.476603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.476900 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbxfk\" (UniqueName: \"kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.477076 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.477558 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.478191 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.499693 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbxfk\" (UniqueName: \"kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk\") pod \"dnsmasq-dns-78dd6ddcc-8dmvk\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.520210 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.731312 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.757747 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.759192 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.783306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.882801 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.882862 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.882891 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm66q\" (UniqueName: \"kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.935734 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.983831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.983883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.983909 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm66q\" (UniqueName: \"kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.984786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.985173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:27 crc kubenswrapper[4884]: I1128 15:39:27.999537 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm66q\" (UniqueName: \"kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q\") pod \"dnsmasq-dns-666b6646f7-5r2db\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.056923 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:39:28 crc kubenswrapper[4884]: W1128 15:39:28.059376 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod156eac68_0e44_450f_bdcd_ba13600231f6.slice/crio-3a33e6b6ebc476adef9ffc94d91c1404b3931aa4c269d2600140827ef55f2c08 WatchSource:0}: Error finding container 3a33e6b6ebc476adef9ffc94d91c1404b3931aa4c269d2600140827ef55f2c08: Status 404 returned error can't find the container with id 3a33e6b6ebc476adef9ffc94d91c1404b3931aa4c269d2600140827ef55f2c08 Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.090650 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.096868 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" event={"ID":"6f42f94f-f913-4b71-ade4-7c228064a21e","Type":"ContainerStarted","Data":"0d9ab7f5713862e22f17e2af1ab0b483dd07726850f1e72dc8905b96cd34a8ce"} Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.098548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" event={"ID":"156eac68-0e44-450f-bdcd-ba13600231f6","Type":"ContainerStarted","Data":"3a33e6b6ebc476adef9ffc94d91c1404b3931aa4c269d2600140827ef55f2c08"} Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.438459 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.459313 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.460763 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.468427 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.506524 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.591595 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.591659 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c2br\" (UniqueName: \"kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.591734 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.698171 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.699203 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.699431 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.699570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c2br\" (UniqueName: \"kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.700156 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.735847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c2br\" (UniqueName: \"kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br\") pod \"dnsmasq-dns-57d769cc4f-6gzfb\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.778965 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.891205 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.896422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901238 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901344 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-kwd9z" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901470 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901862 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901916 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.901948 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.902024 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 15:39:28 crc kubenswrapper[4884]: I1128 15:39:28.902535 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.003472 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.003876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.003962 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004055 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004088 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004132 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.004152 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st6wl\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105804 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st6wl\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105893 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105926 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105979 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.105997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.106018 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.106038 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.106052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.106067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.107280 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.108900 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.110814 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.111033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.111069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" event={"ID":"9de8b98b-fa0a-43a6-aad4-de8f12035d7a","Type":"ContainerStarted","Data":"f41bec43da677e3579431109434694b3e9282306955dd34b65631fcac839f538"} Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.111168 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.111857 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.112119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.113475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.114476 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.118893 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.129515 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.139277 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st6wl\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl\") pod \"rabbitmq-server-0\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.214541 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.285603 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:39:29 crc kubenswrapper[4884]: W1128 15:39:29.295747 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode830a879_0bc6_424d_8d35_e3ea1be1590c.slice/crio-ac340c73e751c754a91a9951f83bb7080a0b43912733718fc9ce7b668dc0751b WatchSource:0}: Error finding container ac340c73e751c754a91a9951f83bb7080a0b43912733718fc9ce7b668dc0751b: Status 404 returned error can't find the container with id ac340c73e751c754a91a9951f83bb7080a0b43912733718fc9ce7b668dc0751b Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.450827 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:39:29 crc kubenswrapper[4884]: W1128 15:39:29.501779 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a5d81bd_3b99_4aa6_82dc_2969295dce39.slice/crio-21d21e46f032bd54f79671f22bb01555ab1035acaacad9b170e3ed659f48d1fa WatchSource:0}: Error finding container 21d21e46f032bd54f79671f22bb01555ab1035acaacad9b170e3ed659f48d1fa: Status 404 returned error can't find the container with id 21d21e46f032bd54f79671f22bb01555ab1035acaacad9b170e3ed659f48d1fa Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.576753 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.583256 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.584623 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.585364 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586098 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586300 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586366 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-r99hp" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586473 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586592 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.586616 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.715880 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716042 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716545 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716629 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716711 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716780 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.716918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.717767 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.717836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhz99\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.717889 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.717979 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822489 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822554 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822575 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhz99\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822597 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822643 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822688 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.822709 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.823933 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.823996 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.824012 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.824126 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.825032 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.825571 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.826691 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.826928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.830951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.831229 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.831424 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.847803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhz99\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.849234 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.851318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:29 crc kubenswrapper[4884]: I1128 15:39:29.911277 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:39:30 crc kubenswrapper[4884]: I1128 15:39:30.138401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" event={"ID":"e830a879-0bc6-424d-8d35-e3ea1be1590c","Type":"ContainerStarted","Data":"ac340c73e751c754a91a9951f83bb7080a0b43912733718fc9ce7b668dc0751b"} Nov 28 15:39:30 crc kubenswrapper[4884]: I1128 15:39:30.142071 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerStarted","Data":"21d21e46f032bd54f79671f22bb01555ab1035acaacad9b170e3ed659f48d1fa"} Nov 28 15:39:30 crc kubenswrapper[4884]: I1128 15:39:30.405649 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:39:30 crc kubenswrapper[4884]: W1128 15:39:30.437260 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod454fa1ac_19ca_4c44_b0fb_2c30039524a7.slice/crio-138c21efc8405d370794d7b08f6bec398024dcaba5158a73178620e924251f6e WatchSource:0}: Error finding container 138c21efc8405d370794d7b08f6bec398024dcaba5158a73178620e924251f6e: Status 404 returned error can't find the container with id 138c21efc8405d370794d7b08f6bec398024dcaba5158a73178620e924251f6e Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.062446 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.065355 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.068574 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.068822 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.069793 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.070214 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.070849 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9pn94" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.079039 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.082460 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.147819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.147863 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjjl4\" (UniqueName: \"kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.147905 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.147946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.147972 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.148047 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.148074 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.148152 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.148211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.154869 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerStarted","Data":"138c21efc8405d370794d7b08f6bec398024dcaba5158a73178620e924251f6e"} Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250551 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250643 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjjl4\" (UniqueName: \"kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250681 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250721 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250743 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250798 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.250827 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.252069 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.256451 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.257285 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.257416 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.259391 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.262644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.263661 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.267584 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.279848 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjjl4\" (UniqueName: \"kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.286193 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " pod="openstack/openstack-galera-0" Nov 28 15:39:31 crc kubenswrapper[4884]: I1128 15:39:31.408888 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.058534 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:39:32 crc kubenswrapper[4884]: W1128 15:39:32.083358 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b1d00ac_0efe_45af_9366_f5d302b86ccb.slice/crio-dbe078766fa53127c485bc5b4e73106eeb3828ea5c293808dcca23903df29a2f WatchSource:0}: Error finding container dbe078766fa53127c485bc5b4e73106eeb3828ea5c293808dcca23903df29a2f: Status 404 returned error can't find the container with id dbe078766fa53127c485bc5b4e73106eeb3828ea5c293808dcca23903df29a2f Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.164219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerStarted","Data":"dbe078766fa53127c485bc5b4e73106eeb3828ea5c293808dcca23903df29a2f"} Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.464298 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.465770 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.467623 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-mbjp7" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.468042 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.468289 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.468335 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.472991 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605245 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605355 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605374 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtvc9\" (UniqueName: \"kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605701 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605824 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605878 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.605900 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710304 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710357 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710435 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.710588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtvc9\" (UniqueName: \"kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.712270 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.712556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.715504 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.716975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.730833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.733993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.740152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.745451 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.747431 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtvc9\" (UniqueName: \"kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.754121 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.755021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.761401 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.761571 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-c26vv" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.761683 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.780384 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.795286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.811994 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.812141 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25ht4\" (UniqueName: \"kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.812162 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.812602 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.812656 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.914424 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25ht4\" (UniqueName: \"kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.914486 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.914513 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.914551 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.914617 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.915611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.915883 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.919624 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.922516 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:32 crc kubenswrapper[4884]: I1128 15:39:32.934441 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25ht4\" (UniqueName: \"kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4\") pod \"memcached-0\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " pod="openstack/memcached-0" Nov 28 15:39:33 crc kubenswrapper[4884]: I1128 15:39:33.090625 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:39:33 crc kubenswrapper[4884]: I1128 15:39:33.156526 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:39:34 crc kubenswrapper[4884]: I1128 15:39:34.808688 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:39:34 crc kubenswrapper[4884]: I1128 15:39:34.812622 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:39:34 crc kubenswrapper[4884]: I1128 15:39:34.814596 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-5jtnh" Nov 28 15:39:34 crc kubenswrapper[4884]: I1128 15:39:34.830752 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:39:34 crc kubenswrapper[4884]: I1128 15:39:34.956608 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gdjj\" (UniqueName: \"kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj\") pod \"kube-state-metrics-0\" (UID: \"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:39:35 crc kubenswrapper[4884]: I1128 15:39:35.057978 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gdjj\" (UniqueName: \"kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj\") pod \"kube-state-metrics-0\" (UID: \"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:39:35 crc kubenswrapper[4884]: I1128 15:39:35.082887 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gdjj\" (UniqueName: \"kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj\") pod \"kube-state-metrics-0\" (UID: \"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:39:35 crc kubenswrapper[4884]: I1128 15:39:35.132285 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.344569 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.346076 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.357959 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.358720 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.360143 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-w49lm" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.376929 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.383432 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.397469 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411321 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411453 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411508 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411587 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411618 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmqpn\" (UniqueName: \"kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.411644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.422882 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513623 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513681 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xjfb\" (UniqueName: \"kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513701 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513746 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513807 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513870 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513898 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.513943 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmqpn\" (UniqueName: \"kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.514644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.514767 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.514843 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.517671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.524979 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.526223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.537350 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmqpn\" (UniqueName: \"kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn\") pod \"ovn-controller-tnsft\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xjfb\" (UniqueName: \"kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615381 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615435 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615460 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.615672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.617599 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.617817 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.617886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.618006 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.638878 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xjfb\" (UniqueName: \"kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb\") pod \"ovn-controller-ovs-vm8q9\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.688502 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft" Nov 28 15:39:38 crc kubenswrapper[4884]: I1128 15:39:38.712978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.243369 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.245071 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.249882 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.250111 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-2b5st" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.250244 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.250329 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.250457 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.264170 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324464 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324506 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324530 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-259pm\" (UniqueName: \"kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324766 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324842 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.324914 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426248 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426311 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426483 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426507 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.426573 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-259pm\" (UniqueName: \"kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.427004 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.427018 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.427654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.428526 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.430436 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.430671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.433688 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.450772 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-259pm\" (UniqueName: \"kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.470235 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:39 crc kubenswrapper[4884]: I1128 15:39:39.588590 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.421652 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.424971 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.427786 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.427791 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.427903 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-sf4qw" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.428996 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.446303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.484882 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.484943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485007 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485075 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485121 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485144 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhhv5\" (UniqueName: \"kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.485162 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.586652 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.586736 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.586788 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.586960 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587500 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhhv5\" (UniqueName: \"kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587547 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587583 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587653 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.587708 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.588703 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.589169 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.593219 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.595235 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.604520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.607543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.607690 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhhv5\" (UniqueName: \"kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5\") pod \"ovsdbserver-sb-0\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:42 crc kubenswrapper[4884]: I1128 15:39:42.758311 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:39:47 crc kubenswrapper[4884]: I1128 15:39:47.552294 4884 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-qzhng container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:39:47 crc kubenswrapper[4884]: I1128 15:39:47.552886 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-qzhng" podUID="88c4733c-f3d7-4718-a865-bd4b9b510fbe" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.573448 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.573996 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-st6wl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(3a5d81bd-3b99-4aa6-82dc-2969295dce39): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.575698 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.581447 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.581613 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rhz99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(454fa1ac-19ca-4c44-b0fb-2c30039524a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:50 crc kubenswrapper[4884]: E1128 15:39:50.582688 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" Nov 28 15:39:51 crc kubenswrapper[4884]: E1128 15:39:51.329367 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" Nov 28 15:39:51 crc kubenswrapper[4884]: E1128 15:39:51.329492 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" Nov 28 15:39:52 crc kubenswrapper[4884]: E1128 15:39:52.521426 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 28 15:39:52 crc kubenswrapper[4884]: E1128 15:39:52.521872 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sjjl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(2b1d00ac-0efe-45af-9366-f5d302b86ccb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:52 crc kubenswrapper[4884]: E1128 15:39:52.523122 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.226958 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.228153 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wvjqb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-g2cpk_openstack(6f42f94f-f913-4b71-ade4-7c228064a21e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.233402 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" podUID="6f42f94f-f913-4b71-ade4-7c228064a21e" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.243132 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.243315 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rbxfk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-8dmvk_openstack(156eac68-0e44-450f-bdcd-ba13600231f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.244487 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" podUID="156eac68-0e44-450f-bdcd-ba13600231f6" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.244645 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.244772 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hm66q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-5r2db_openstack(9de8b98b-fa0a-43a6-aad4-de8f12035d7a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.245950 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" podUID="9de8b98b-fa0a-43a6-aad4-de8f12035d7a" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.253287 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.253418 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7c2br,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-6gzfb_openstack(e830a879-0bc6-424d-8d35-e3ea1be1590c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.254627 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" podUID="e830a879-0bc6-424d-8d35-e3ea1be1590c" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.347714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" podUID="e830a879-0bc6-424d-8d35-e3ea1be1590c" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.349185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" podUID="156eac68-0e44-450f-bdcd-ba13600231f6" Nov 28 15:39:53 crc kubenswrapper[4884]: E1128 15:39:53.349249 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.802885 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.862126 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:39:53 crc kubenswrapper[4884]: W1128 15:39:53.874645 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfe29bb1_1749_44e8_8a9a_a44a8e85e95c.slice/crio-35aaa2c4a815d508f202e3cacf560818d17aa894afd5e4ee436dc53cacf1b186 WatchSource:0}: Error finding container 35aaa2c4a815d508f202e3cacf560818d17aa894afd5e4ee436dc53cacf1b186: Status 404 returned error can't find the container with id 35aaa2c4a815d508f202e3cacf560818d17aa894afd5e4ee436dc53cacf1b186 Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.875583 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 15:39:53 crc kubenswrapper[4884]: W1128 15:39:53.884814 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod959ac7da_0d4b_48f3_84af_2650cd91c143.slice/crio-b34e7186a5d0c35de269d77b36b38a5d3e968f3fd191a37b932c868decdb47b0 WatchSource:0}: Error finding container b34e7186a5d0c35de269d77b36b38a5d3e968f3fd191a37b932c868decdb47b0: Status 404 returned error can't find the container with id b34e7186a5d0c35de269d77b36b38a5d3e968f3fd191a37b932c868decdb47b0 Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.885742 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.921338 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config\") pod \"6f42f94f-f913-4b71-ade4-7c228064a21e\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.921983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config" (OuterVolumeSpecName: "config") pod "6f42f94f-f913-4b71-ade4-7c228064a21e" (UID: "6f42f94f-f913-4b71-ade4-7c228064a21e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.922251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvjqb\" (UniqueName: \"kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb\") pod \"6f42f94f-f913-4b71-ade4-7c228064a21e\" (UID: \"6f42f94f-f913-4b71-ade4-7c228064a21e\") " Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.923449 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f42f94f-f913-4b71-ade4-7c228064a21e-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.927841 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb" (OuterVolumeSpecName: "kube-api-access-wvjqb") pod "6f42f94f-f913-4b71-ade4-7c228064a21e" (UID: "6f42f94f-f913-4b71-ade4-7c228064a21e"). InnerVolumeSpecName "kube-api-access-wvjqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4884]: I1128 15:39:53.960387 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.025268 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm66q\" (UniqueName: \"kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q\") pod \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.025328 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc\") pod \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.025359 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config\") pod \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\" (UID: \"9de8b98b-fa0a-43a6-aad4-de8f12035d7a\") " Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.025805 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvjqb\" (UniqueName: \"kubernetes.io/projected/6f42f94f-f913-4b71-ade4-7c228064a21e-kube-api-access-wvjqb\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.025998 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9de8b98b-fa0a-43a6-aad4-de8f12035d7a" (UID: "9de8b98b-fa0a-43a6-aad4-de8f12035d7a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.026360 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config" (OuterVolumeSpecName: "config") pod "9de8b98b-fa0a-43a6-aad4-de8f12035d7a" (UID: "9de8b98b-fa0a-43a6-aad4-de8f12035d7a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.030379 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q" (OuterVolumeSpecName: "kube-api-access-hm66q") pod "9de8b98b-fa0a-43a6-aad4-de8f12035d7a" (UID: "9de8b98b-fa0a-43a6-aad4-de8f12035d7a"). InnerVolumeSpecName "kube-api-access-hm66q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.051382 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:39:54 crc kubenswrapper[4884]: W1128 15:39:54.061190 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf314d326_c20e_41cb_8fb5_a608d002b170.slice/crio-3ac63c313a3776367e5a7d8ec66a5be09ebaed1a359805edbe89bf1de42ef881 WatchSource:0}: Error finding container 3ac63c313a3776367e5a7d8ec66a5be09ebaed1a359805edbe89bf1de42ef881: Status 404 returned error can't find the container with id 3ac63c313a3776367e5a7d8ec66a5be09ebaed1a359805edbe89bf1de42ef881 Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.062818 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.116913 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:39:54 crc kubenswrapper[4884]: W1128 15:39:54.123446 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc33f5fde_1bf4_406f_ad32_a1ed3ee3db9f.slice/crio-8bf1c17a1dcac57300b9682afa060d2b53f593ebbbbad04d7a0960a3ff2b3121 WatchSource:0}: Error finding container 8bf1c17a1dcac57300b9682afa060d2b53f593ebbbbad04d7a0960a3ff2b3121: Status 404 returned error can't find the container with id 8bf1c17a1dcac57300b9682afa060d2b53f593ebbbbad04d7a0960a3ff2b3121 Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.126804 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm66q\" (UniqueName: \"kubernetes.io/projected/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-kube-api-access-hm66q\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.126831 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.126842 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9de8b98b-fa0a-43a6-aad4-de8f12035d7a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.220472 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:39:54 crc kubenswrapper[4884]: W1128 15:39:54.221843 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce9413d4_1548_44f6_a50d_dcae9284f674.slice/crio-42dd396c596ca2375cb58db1a0cae311ef33b9931da3c9fa6a0d428e8aff4c66 WatchSource:0}: Error finding container 42dd396c596ca2375cb58db1a0cae311ef33b9931da3c9fa6a0d428e8aff4c66: Status 404 returned error can't find the container with id 42dd396c596ca2375cb58db1a0cae311ef33b9931da3c9fa6a0d428e8aff4c66 Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.353179 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"959ac7da-0d4b-48f3-84af-2650cd91c143","Type":"ContainerStarted","Data":"b34e7186a5d0c35de269d77b36b38a5d3e968f3fd191a37b932c868decdb47b0"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.354584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerStarted","Data":"42dd396c596ca2375cb58db1a0cae311ef33b9931da3c9fa6a0d428e8aff4c66"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.356040 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft" event={"ID":"f314d326-c20e-41cb-8fb5-a608d002b170","Type":"ContainerStarted","Data":"3ac63c313a3776367e5a7d8ec66a5be09ebaed1a359805edbe89bf1de42ef881"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.357529 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.357517 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-5r2db" event={"ID":"9de8b98b-fa0a-43a6-aad4-de8f12035d7a","Type":"ContainerDied","Data":"f41bec43da677e3579431109434694b3e9282306955dd34b65631fcac839f538"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.358953 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerStarted","Data":"044fe30c427646beca35b46432244b97d3f929afc22157e44f74964314cf5593"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.360450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerStarted","Data":"63e656df8f1d12ce740664a60092cbcac3a306b5dd1672c6f364faf74c96c8d0"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.361600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerStarted","Data":"8bf1c17a1dcac57300b9682afa060d2b53f593ebbbbad04d7a0960a3ff2b3121"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.362668 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c","Type":"ContainerStarted","Data":"35aaa2c4a815d508f202e3cacf560818d17aa894afd5e4ee436dc53cacf1b186"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.364020 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" event={"ID":"6f42f94f-f913-4b71-ade4-7c228064a21e","Type":"ContainerDied","Data":"0d9ab7f5713862e22f17e2af1ab0b483dd07726850f1e72dc8905b96cd34a8ce"} Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.364040 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-g2cpk" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.432921 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.443412 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-5r2db"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.475164 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.485075 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-g2cpk"] Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.699009 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f42f94f-f913-4b71-ade4-7c228064a21e" path="/var/lib/kubelet/pods/6f42f94f-f913-4b71-ade4-7c228064a21e/volumes" Nov 28 15:39:54 crc kubenswrapper[4884]: I1128 15:39:54.699385 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9de8b98b-fa0a-43a6-aad4-de8f12035d7a" path="/var/lib/kubelet/pods/9de8b98b-fa0a-43a6-aad4-de8f12035d7a/volumes" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.400481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c","Type":"ContainerStarted","Data":"33d3a0261db185faf83d34a47fdd9b6a51b9036f89e24846369b69f505429032"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.401028 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.404309 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerStarted","Data":"702d39f50ebfee6a7674542892d8e03d46e3c07866fffdb7282582d852c4e020"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.406299 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"959ac7da-0d4b-48f3-84af-2650cd91c143","Type":"ContainerStarted","Data":"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.406360 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.408335 4884 generic.go:334] "Generic (PLEG): container finished" podID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerID="f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5" exitCode=0 Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.408550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerDied","Data":"f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.414726 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerStarted","Data":"2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.421304 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft" event={"ID":"f314d326-c20e-41cb-8fb5-a608d002b170","Type":"ContainerStarted","Data":"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.421483 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-tnsft" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.422691 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=21.13690267 podStartE2EDuration="25.422665628s" podCreationTimestamp="2025-11-28 15:39:34 +0000 UTC" firstStartedPulling="2025-11-28 15:39:53.881567067 +0000 UTC m=+1233.444350868" lastFinishedPulling="2025-11-28 15:39:58.167330025 +0000 UTC m=+1237.730113826" observedRunningTime="2025-11-28 15:39:59.414419947 +0000 UTC m=+1238.977203748" watchObservedRunningTime="2025-11-28 15:39:59.422665628 +0000 UTC m=+1238.985449429" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.433407 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerStarted","Data":"ae49153625b5a710cac5ba1fd2d4e6f0fc8441454ec160836b76057c9cc2c22e"} Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.493314 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=23.814252243 podStartE2EDuration="27.493288659s" podCreationTimestamp="2025-11-28 15:39:32 +0000 UTC" firstStartedPulling="2025-11-28 15:39:53.88858878 +0000 UTC m=+1233.451372581" lastFinishedPulling="2025-11-28 15:39:57.567625196 +0000 UTC m=+1237.130408997" observedRunningTime="2025-11-28 15:39:59.476663344 +0000 UTC m=+1239.039447165" watchObservedRunningTime="2025-11-28 15:39:59.493288659 +0000 UTC m=+1239.056072460" Nov 28 15:39:59 crc kubenswrapper[4884]: I1128 15:39:59.503175 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-tnsft" podStartSLOduration=17.474548932 podStartE2EDuration="21.50315702s" podCreationTimestamp="2025-11-28 15:39:38 +0000 UTC" firstStartedPulling="2025-11-28 15:39:54.066981058 +0000 UTC m=+1233.629764859" lastFinishedPulling="2025-11-28 15:39:58.095589146 +0000 UTC m=+1237.658372947" observedRunningTime="2025-11-28 15:39:59.494629062 +0000 UTC m=+1239.057412873" watchObservedRunningTime="2025-11-28 15:39:59.50315702 +0000 UTC m=+1239.065940821" Nov 28 15:40:00 crc kubenswrapper[4884]: I1128 15:40:00.444571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerStarted","Data":"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f"} Nov 28 15:40:00 crc kubenswrapper[4884]: I1128 15:40:00.445355 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerStarted","Data":"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4"} Nov 28 15:40:00 crc kubenswrapper[4884]: I1128 15:40:00.471922 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-vm8q9" podStartSLOduration=18.963751895 podStartE2EDuration="22.471902376s" podCreationTimestamp="2025-11-28 15:39:38 +0000 UTC" firstStartedPulling="2025-11-28 15:39:54.224424856 +0000 UTC m=+1233.787208657" lastFinishedPulling="2025-11-28 15:39:57.732575337 +0000 UTC m=+1237.295359138" observedRunningTime="2025-11-28 15:40:00.462550788 +0000 UTC m=+1240.025334589" watchObservedRunningTime="2025-11-28 15:40:00.471902376 +0000 UTC m=+1240.034686177" Nov 28 15:40:01 crc kubenswrapper[4884]: I1128 15:40:01.454648 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:40:01 crc kubenswrapper[4884]: I1128 15:40:01.454860 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.468208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerStarted","Data":"a5c169886253947fb2ce50aaadfd10698791496c5feb2c5fc8da5851f6053e65"} Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.471787 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerStarted","Data":"3b941bb6a3a7e306dfa8d1b96625c36d59ceacd6279193eca68547bae5466ce0"} Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.474373 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerID="702d39f50ebfee6a7674542892d8e03d46e3c07866fffdb7282582d852c4e020" exitCode=0 Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.474429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerDied","Data":"702d39f50ebfee6a7674542892d8e03d46e3c07866fffdb7282582d852c4e020"} Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.508751 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=14.050182797 podStartE2EDuration="21.50872553s" podCreationTimestamp="2025-11-28 15:39:41 +0000 UTC" firstStartedPulling="2025-11-28 15:39:53.974520214 +0000 UTC m=+1233.537304015" lastFinishedPulling="2025-11-28 15:40:01.433062947 +0000 UTC m=+1240.995846748" observedRunningTime="2025-11-28 15:40:02.495534479 +0000 UTC m=+1242.058318290" watchObservedRunningTime="2025-11-28 15:40:02.50872553 +0000 UTC m=+1242.071509341" Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.549630 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=17.259496039 podStartE2EDuration="24.549610726s" podCreationTimestamp="2025-11-28 15:39:38 +0000 UTC" firstStartedPulling="2025-11-28 15:39:54.127215566 +0000 UTC m=+1233.689999367" lastFinishedPulling="2025-11-28 15:40:01.417330263 +0000 UTC m=+1240.980114054" observedRunningTime="2025-11-28 15:40:02.530215563 +0000 UTC m=+1242.092999374" watchObservedRunningTime="2025-11-28 15:40:02.549610726 +0000 UTC m=+1242.112394547" Nov 28 15:40:02 crc kubenswrapper[4884]: I1128 15:40:02.758849 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.162240 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.483951 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerStarted","Data":"73bf92758339020175aea8e11f436696e330b710e4144aaee2fa67b45a5f9276"} Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.507595 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=29.826755417 podStartE2EDuration="32.507575929s" podCreationTimestamp="2025-11-28 15:39:31 +0000 UTC" firstStartedPulling="2025-11-28 15:39:54.044647233 +0000 UTC m=+1233.607431034" lastFinishedPulling="2025-11-28 15:39:56.725467745 +0000 UTC m=+1236.288251546" observedRunningTime="2025-11-28 15:40:03.50552392 +0000 UTC m=+1243.068307721" watchObservedRunningTime="2025-11-28 15:40:03.507575929 +0000 UTC m=+1243.070359740" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.590320 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.633289 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.758849 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:03 crc kubenswrapper[4884]: I1128 15:40:03.800313 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.495149 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.549278 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.549588 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.812806 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.840021 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bkmzb"] Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.841632 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.843773 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.860517 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bkmzb"] Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.975557 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.987439 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:04 crc kubenswrapper[4884]: I1128 15:40:04.998283 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.014671 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.025639 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.025752 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26n2x\" (UniqueName: \"kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.025869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.025948 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.088883 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.103886 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.105476 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.109535 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.109873 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-pbfxv" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.110013 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.111559 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127455 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127538 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127565 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44rdp\" (UniqueName: \"kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127598 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127631 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127686 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26n2x\" (UniqueName: \"kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127717 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.127734 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.134213 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.139657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.140368 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.149703 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.150937 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.163186 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.172138 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.184115 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.193120 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.220882 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bkmzb"] Nov 28 15:40:05 crc kubenswrapper[4884]: E1128 15:40:05.221464 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-26n2x], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" podUID="699fdb63-e923-4029-86d5-e271fe0cf8eb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229660 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229713 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229746 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44rdp\" (UniqueName: \"kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229771 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229792 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229863 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7x96\" (UniqueName: \"kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229909 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229934 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.229981 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.232224 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.232726 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.232775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.265991 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26n2x\" (UniqueName: \"kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x\") pod \"dnsmasq-dns-5bf47b49b7-bkmzb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.266755 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.268838 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.281686 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.285936 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44rdp\" (UniqueName: \"kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp\") pod \"ovn-controller-metrics-sndnk\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.297240 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.297363 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331078 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331136 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7x96\" (UniqueName: \"kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331167 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331194 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331235 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331255 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph65v\" (UniqueName: \"kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331307 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331327 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331350 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331394 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.331426 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.332482 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.333052 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.337289 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.338021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.339850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.341779 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.344506 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.370724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7x96\" (UniqueName: \"kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96\") pod \"ovn-northd-0\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.432804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433049 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433102 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph65v\" (UniqueName: \"kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433120 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433137 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmg79\" (UniqueName: \"kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433209 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433230 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433251 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.433926 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.435856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.436171 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.436626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.439374 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.491707 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.509737 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.518219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-8dmvk" event={"ID":"156eac68-0e44-450f-bdcd-ba13600231f6","Type":"ContainerDied","Data":"3a33e6b6ebc476adef9ffc94d91c1404b3931aa4c269d2600140827ef55f2c08"} Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.518306 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.528254 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.534806 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc\") pod \"156eac68-0e44-450f-bdcd-ba13600231f6\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.534860 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbxfk\" (UniqueName: \"kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk\") pod \"156eac68-0e44-450f-bdcd-ba13600231f6\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.535057 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config\") pod \"156eac68-0e44-450f-bdcd-ba13600231f6\" (UID: \"156eac68-0e44-450f-bdcd-ba13600231f6\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.535474 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "156eac68-0e44-450f-bdcd-ba13600231f6" (UID: "156eac68-0e44-450f-bdcd-ba13600231f6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.535543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config" (OuterVolumeSpecName: "config") pod "156eac68-0e44-450f-bdcd-ba13600231f6" (UID: "156eac68-0e44-450f-bdcd-ba13600231f6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.535727 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.536459 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.536533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmg79\" (UniqueName: \"kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.536626 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.537460 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.537590 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.537706 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.537722 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/156eac68-0e44-450f-bdcd-ba13600231f6-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.537399 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.538546 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.538864 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.546528 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph65v\" (UniqueName: \"kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v\") pod \"dnsmasq-dns-8554648995-74d7m\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.550189 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk" (OuterVolumeSpecName: "kube-api-access-rbxfk") pod "156eac68-0e44-450f-bdcd-ba13600231f6" (UID: "156eac68-0e44-450f-bdcd-ba13600231f6"). InnerVolumeSpecName "kube-api-access-rbxfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.556960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmg79\" (UniqueName: \"kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79\") pod \"dnsmasq-dns-b8fbc5445-k2vzb\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.579643 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.642097 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26n2x\" (UniqueName: \"kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x\") pod \"699fdb63-e923-4029-86d5-e271fe0cf8eb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.642326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config\") pod \"699fdb63-e923-4029-86d5-e271fe0cf8eb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.642445 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc\") pod \"699fdb63-e923-4029-86d5-e271fe0cf8eb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.642493 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb\") pod \"699fdb63-e923-4029-86d5-e271fe0cf8eb\" (UID: \"699fdb63-e923-4029-86d5-e271fe0cf8eb\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.643076 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbxfk\" (UniqueName: \"kubernetes.io/projected/156eac68-0e44-450f-bdcd-ba13600231f6-kube-api-access-rbxfk\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.645437 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "699fdb63-e923-4029-86d5-e271fe0cf8eb" (UID: "699fdb63-e923-4029-86d5-e271fe0cf8eb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.645537 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config" (OuterVolumeSpecName: "config") pod "699fdb63-e923-4029-86d5-e271fe0cf8eb" (UID: "699fdb63-e923-4029-86d5-e271fe0cf8eb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.648248 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "699fdb63-e923-4029-86d5-e271fe0cf8eb" (UID: "699fdb63-e923-4029-86d5-e271fe0cf8eb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.736485 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.745756 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.745780 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.745818 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/699fdb63-e923-4029-86d5-e271fe0cf8eb-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.786823 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x" (OuterVolumeSpecName: "kube-api-access-26n2x") pod "699fdb63-e923-4029-86d5-e271fe0cf8eb" (UID: "699fdb63-e923-4029-86d5-e271fe0cf8eb"). InnerVolumeSpecName "kube-api-access-26n2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.810473 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.848477 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26n2x\" (UniqueName: \"kubernetes.io/projected/699fdb63-e923-4029-86d5-e271fe0cf8eb-kube-api-access-26n2x\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.897578 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.906859 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-8dmvk"] Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.953711 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config\") pod \"e830a879-0bc6-424d-8d35-e3ea1be1590c\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.953853 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc\") pod \"e830a879-0bc6-424d-8d35-e3ea1be1590c\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.953910 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c2br\" (UniqueName: \"kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br\") pod \"e830a879-0bc6-424d-8d35-e3ea1be1590c\" (UID: \"e830a879-0bc6-424d-8d35-e3ea1be1590c\") " Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.955101 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config" (OuterVolumeSpecName: "config") pod "e830a879-0bc6-424d-8d35-e3ea1be1590c" (UID: "e830a879-0bc6-424d-8d35-e3ea1be1590c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.955332 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e830a879-0bc6-424d-8d35-e3ea1be1590c" (UID: "e830a879-0bc6-424d-8d35-e3ea1be1590c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.965228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br" (OuterVolumeSpecName: "kube-api-access-7c2br") pod "e830a879-0bc6-424d-8d35-e3ea1be1590c" (UID: "e830a879-0bc6-424d-8d35-e3ea1be1590c"). InnerVolumeSpecName "kube-api-access-7c2br". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:05 crc kubenswrapper[4884]: I1128 15:40:05.970425 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.057953 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.057989 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c2br\" (UniqueName: \"kubernetes.io/projected/e830a879-0bc6-424d-8d35-e3ea1be1590c-kube-api-access-7c2br\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.057999 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e830a879-0bc6-424d-8d35-e3ea1be1590c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.269074 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:40:06 crc kubenswrapper[4884]: W1128 15:40:06.277267 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cdac902_b0e5_4f41_923c_07241207d730.slice/crio-0894a630a352c74e7387b4a09b7a1ede361348647619265695937aa06d289b29 WatchSource:0}: Error finding container 0894a630a352c74e7387b4a09b7a1ede361348647619265695937aa06d289b29: Status 404 returned error can't find the container with id 0894a630a352c74e7387b4a09b7a1ede361348647619265695937aa06d289b29 Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.320561 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.343764 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: W1128 15:40:06.349392 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84790bab_dc32_4066_a074_aa087eb1941f.slice/crio-059ca57656a624aa35f1601035d8b34fe0c576ddca40b32d4929a569a11ddf7b WatchSource:0}: Error finding container 059ca57656a624aa35f1601035d8b34fe0c576ddca40b32d4929a569a11ddf7b: Status 404 returned error can't find the container with id 059ca57656a624aa35f1601035d8b34fe0c576ddca40b32d4929a569a11ddf7b Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.350913 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zgtbc" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.351562 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.352516 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.356193 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.372598 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.390838 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.406969 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:06 crc kubenswrapper[4884]: W1128 15:40:06.409466 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19503544_c3b2_4d1f_9abb_8af9baac851a.slice/crio-78cd6d3854d009813ddfa246696c33fc4e2f12f6634d79681cb575a27de594c2 WatchSource:0}: Error finding container 78cd6d3854d009813ddfa246696c33fc4e2f12f6634d79681cb575a27de594c2: Status 404 returned error can't find the container with id 78cd6d3854d009813ddfa246696c33fc4e2f12f6634d79681cb575a27de594c2 Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.466926 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.467024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.467067 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.467217 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.467250 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs4vn\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.518290 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerStarted","Data":"0894a630a352c74e7387b4a09b7a1ede361348647619265695937aa06d289b29"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.520038 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerStarted","Data":"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.521404 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerStarted","Data":"261aa2bf3aa707f9ac09db8380136376399868fb0e2b7fcc555077a346a02c2f"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.523081 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sndnk" event={"ID":"69cf456a-4993-4bd5-b745-5d73a65b6b91","Type":"ContainerStarted","Data":"59f590909455eb43b2d839c335076a9aae027b6d3bbbbda17ffef2e52abe1e46"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.523188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sndnk" event={"ID":"69cf456a-4993-4bd5-b745-5d73a65b6b91","Type":"ContainerStarted","Data":"4b618815a97880b423cd9473468febd7a7c6d7ecfc67c7ddbe2a95872853999f"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.525816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-74d7m" event={"ID":"84790bab-dc32-4066-a074-aa087eb1941f","Type":"ContainerStarted","Data":"059ca57656a624aa35f1601035d8b34fe0c576ddca40b32d4929a569a11ddf7b"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.527840 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" event={"ID":"e830a879-0bc6-424d-8d35-e3ea1be1590c","Type":"ContainerDied","Data":"ac340c73e751c754a91a9951f83bb7080a0b43912733718fc9ce7b668dc0751b"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.528009 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6gzfb" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.537241 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" event={"ID":"19503544-c3b2-4d1f-9abb-8af9baac851a","Type":"ContainerStarted","Data":"78cd6d3854d009813ddfa246696c33fc4e2f12f6634d79681cb575a27de594c2"} Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.537264 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bkmzb" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.564451 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-sndnk" podStartSLOduration=2.5644329900000002 podStartE2EDuration="2.56443299s" podCreationTimestamp="2025-11-28 15:40:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:40:06.563683711 +0000 UTC m=+1246.126467522" watchObservedRunningTime="2025-11-28 15:40:06.56443299 +0000 UTC m=+1246.127216791" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.572213 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.572496 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.572524 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.572608 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.572624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs4vn\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: E1128 15:40:06.572733 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:40:06 crc kubenswrapper[4884]: E1128 15:40:06.572761 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:40:06 crc kubenswrapper[4884]: E1128 15:40:06.572810 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift podName:772cd3d6-daa4-4494-9ce8-6182e011fbc4 nodeName:}" failed. No retries permitted until 2025-11-28 15:40:07.072790644 +0000 UTC m=+1246.635574445 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift") pod "swift-storage-0" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4") : configmap "swift-ring-files" not found Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.573233 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.573263 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.573569 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.613261 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs4vn\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.616070 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.657038 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-dn2ww"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.658343 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.661364 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.661582 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.661780 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.666932 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dn2ww"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.707959 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="156eac68-0e44-450f-bdcd-ba13600231f6" path="/var/lib/kubelet/pods/156eac68-0e44-450f-bdcd-ba13600231f6/volumes" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.708337 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.716193 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6gzfb"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.740150 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bkmzb"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.747027 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bkmzb"] Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776032 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776114 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776157 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjhl\" (UniqueName: \"kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776222 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776500 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776717 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.776837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.877997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.878406 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.878485 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjhl\" (UniqueName: \"kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.879787 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.880370 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.880416 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.880461 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.880492 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.880520 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.883344 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.883601 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.883846 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.895648 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.900585 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjhl\" (UniqueName: \"kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl\") pod \"swift-ring-rebalance-dn2ww\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:06 crc kubenswrapper[4884]: I1128 15:40:06.981937 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.086286 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:07 crc kubenswrapper[4884]: E1128 15:40:07.086473 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:40:07 crc kubenswrapper[4884]: E1128 15:40:07.086493 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:40:07 crc kubenswrapper[4884]: E1128 15:40:07.086548 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift podName:772cd3d6-daa4-4494-9ce8-6182e011fbc4 nodeName:}" failed. No retries permitted until 2025-11-28 15:40:08.086530466 +0000 UTC m=+1247.649314268 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift") pod "swift-storage-0" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4") : configmap "swift-ring-files" not found Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.550241 4884 generic.go:334] "Generic (PLEG): container finished" podID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerID="8055a67f5ddd9c2922f210bcf891a4421187fa7a8f688eebcb988437cb3c541d" exitCode=0 Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.550470 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" event={"ID":"19503544-c3b2-4d1f-9abb-8af9baac851a","Type":"ContainerDied","Data":"8055a67f5ddd9c2922f210bcf891a4421187fa7a8f688eebcb988437cb3c541d"} Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.554910 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerStarted","Data":"1f257c9ad37cf0a9ca938f7cf1d6a36a190fd223c5c58821c4e590d39987fda9"} Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.564153 4884 generic.go:334] "Generic (PLEG): container finished" podID="84790bab-dc32-4066-a074-aa087eb1941f" containerID="a251abc08c9527a6593859012011b63ba4bf38b1234148571c99b68268018d3e" exitCode=0 Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.565544 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-74d7m" event={"ID":"84790bab-dc32-4066-a074-aa087eb1941f","Type":"ContainerDied","Data":"a251abc08c9527a6593859012011b63ba4bf38b1234148571c99b68268018d3e"} Nov 28 15:40:07 crc kubenswrapper[4884]: I1128 15:40:07.587820 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-dn2ww"] Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.105634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:08 crc kubenswrapper[4884]: E1128 15:40:08.106247 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:40:08 crc kubenswrapper[4884]: E1128 15:40:08.106545 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:40:08 crc kubenswrapper[4884]: E1128 15:40:08.106621 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift podName:772cd3d6-daa4-4494-9ce8-6182e011fbc4 nodeName:}" failed. No retries permitted until 2025-11-28 15:40:10.106604434 +0000 UTC m=+1249.669388235 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift") pod "swift-storage-0" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4") : configmap "swift-ring-files" not found Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.576579 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dn2ww" event={"ID":"a24664a4-3143-4fb4-b050-30b81e52a1f3","Type":"ContainerStarted","Data":"b9020560ea6fb1f0c6208acf3c5a06d0449a9f91352f6e755e1fb470e9b6e67d"} Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.580912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-74d7m" event={"ID":"84790bab-dc32-4066-a074-aa087eb1941f","Type":"ContainerStarted","Data":"33d362bf4ccafad300ddf951e246fdb4762f1ab1eab7513ba07a876a4f49be73"} Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.581009 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.587352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" event={"ID":"19503544-c3b2-4d1f-9abb-8af9baac851a","Type":"ContainerStarted","Data":"589b4f6507cc132b62d1fae62d2f303af7a7d6c6e68b173e7fe0518fce07931f"} Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.587565 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.592762 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerStarted","Data":"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e"} Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.593900 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.593965 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerStarted","Data":"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c"} Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.629186 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-74d7m" podStartSLOduration=3.037170211 podStartE2EDuration="3.629167413s" podCreationTimestamp="2025-11-28 15:40:05 +0000 UTC" firstStartedPulling="2025-11-28 15:40:06.363432259 +0000 UTC m=+1245.926216060" lastFinishedPulling="2025-11-28 15:40:06.955429461 +0000 UTC m=+1246.518213262" observedRunningTime="2025-11-28 15:40:08.625143205 +0000 UTC m=+1248.187927016" watchObservedRunningTime="2025-11-28 15:40:08.629167413 +0000 UTC m=+1248.191951224" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.673279 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.06405754 podStartE2EDuration="3.673242068s" podCreationTimestamp="2025-11-28 15:40:05 +0000 UTC" firstStartedPulling="2025-11-28 15:40:06.280250082 +0000 UTC m=+1245.843033903" lastFinishedPulling="2025-11-28 15:40:07.88943463 +0000 UTC m=+1247.452218431" observedRunningTime="2025-11-28 15:40:08.652297597 +0000 UTC m=+1248.215081438" watchObservedRunningTime="2025-11-28 15:40:08.673242068 +0000 UTC m=+1248.236025959" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.684672 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" podStartSLOduration=3.139791923 podStartE2EDuration="3.684645946s" podCreationTimestamp="2025-11-28 15:40:05 +0000 UTC" firstStartedPulling="2025-11-28 15:40:06.411983313 +0000 UTC m=+1245.974767114" lastFinishedPulling="2025-11-28 15:40:06.956837336 +0000 UTC m=+1246.519621137" observedRunningTime="2025-11-28 15:40:08.678786053 +0000 UTC m=+1248.241569954" watchObservedRunningTime="2025-11-28 15:40:08.684645946 +0000 UTC m=+1248.247429767" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.704855 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="699fdb63-e923-4029-86d5-e271fe0cf8eb" path="/var/lib/kubelet/pods/699fdb63-e923-4029-86d5-e271fe0cf8eb/volumes" Nov 28 15:40:08 crc kubenswrapper[4884]: I1128 15:40:08.705619 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e830a879-0bc6-424d-8d35-e3ea1be1590c" path="/var/lib/kubelet/pods/e830a879-0bc6-424d-8d35-e3ea1be1590c/volumes" Nov 28 15:40:10 crc kubenswrapper[4884]: I1128 15:40:10.159019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:10 crc kubenswrapper[4884]: E1128 15:40:10.159267 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:40:10 crc kubenswrapper[4884]: E1128 15:40:10.159423 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:40:10 crc kubenswrapper[4884]: E1128 15:40:10.159479 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift podName:772cd3d6-daa4-4494-9ce8-6182e011fbc4 nodeName:}" failed. No retries permitted until 2025-11-28 15:40:14.159460438 +0000 UTC m=+1253.722244239 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift") pod "swift-storage-0" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4") : configmap "swift-ring-files" not found Nov 28 15:40:10 crc kubenswrapper[4884]: I1128 15:40:10.614648 4884 generic.go:334] "Generic (PLEG): container finished" podID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerID="e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7" exitCode=0 Nov 28 15:40:10 crc kubenswrapper[4884]: I1128 15:40:10.614717 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerDied","Data":"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7"} Nov 28 15:40:11 crc kubenswrapper[4884]: I1128 15:40:11.624134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dn2ww" event={"ID":"a24664a4-3143-4fb4-b050-30b81e52a1f3","Type":"ContainerStarted","Data":"c281696f1781e0ccfc031988e9712f4a3ced548d63c03915d8d3afac3ba9f01f"} Nov 28 15:40:11 crc kubenswrapper[4884]: I1128 15:40:11.630714 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerStarted","Data":"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023"} Nov 28 15:40:11 crc kubenswrapper[4884]: I1128 15:40:11.654659 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-dn2ww" podStartSLOduration=2.565168654 podStartE2EDuration="5.654642178s" podCreationTimestamp="2025-11-28 15:40:06 +0000 UTC" firstStartedPulling="2025-11-28 15:40:07.845586912 +0000 UTC m=+1247.408370713" lastFinishedPulling="2025-11-28 15:40:10.935060446 +0000 UTC m=+1250.497844237" observedRunningTime="2025-11-28 15:40:11.65143568 +0000 UTC m=+1251.214219481" watchObservedRunningTime="2025-11-28 15:40:11.654642178 +0000 UTC m=+1251.217425979" Nov 28 15:40:11 crc kubenswrapper[4884]: I1128 15:40:11.693432 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371995.161358 podStartE2EDuration="41.693418203s" podCreationTimestamp="2025-11-28 15:39:30 +0000 UTC" firstStartedPulling="2025-11-28 15:39:32.084847477 +0000 UTC m=+1211.647631278" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:40:11.688263077 +0000 UTC m=+1251.251046898" watchObservedRunningTime="2025-11-28 15:40:11.693418203 +0000 UTC m=+1251.256202004" Nov 28 15:40:13 crc kubenswrapper[4884]: I1128 15:40:13.091322 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:13 crc kubenswrapper[4884]: I1128 15:40:13.091717 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:13 crc kubenswrapper[4884]: I1128 15:40:13.136813 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:13 crc kubenswrapper[4884]: I1128 15:40:13.689250 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 15:40:14 crc kubenswrapper[4884]: I1128 15:40:14.224031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:14 crc kubenswrapper[4884]: E1128 15:40:14.224205 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 15:40:14 crc kubenswrapper[4884]: E1128 15:40:14.224238 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 15:40:14 crc kubenswrapper[4884]: E1128 15:40:14.224296 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift podName:772cd3d6-daa4-4494-9ce8-6182e011fbc4 nodeName:}" failed. No retries permitted until 2025-11-28 15:40:22.22427295 +0000 UTC m=+1261.787056761 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift") pod "swift-storage-0" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4") : configmap "swift-ring-files" not found Nov 28 15:40:15 crc kubenswrapper[4884]: I1128 15:40:15.581460 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:15 crc kubenswrapper[4884]: I1128 15:40:15.738356 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:15 crc kubenswrapper[4884]: I1128 15:40:15.794266 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:15 crc kubenswrapper[4884]: I1128 15:40:15.794594 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-74d7m" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="dnsmasq-dns" containerID="cri-o://33d362bf4ccafad300ddf951e246fdb4762f1ab1eab7513ba07a876a4f49be73" gracePeriod=10 Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.687712 4884 generic.go:334] "Generic (PLEG): container finished" podID="84790bab-dc32-4066-a074-aa087eb1941f" containerID="33d362bf4ccafad300ddf951e246fdb4762f1ab1eab7513ba07a876a4f49be73" exitCode=0 Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.698320 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-74d7m" event={"ID":"84790bab-dc32-4066-a074-aa087eb1941f","Type":"ContainerDied","Data":"33d362bf4ccafad300ddf951e246fdb4762f1ab1eab7513ba07a876a4f49be73"} Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.807754 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.986181 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb\") pod \"84790bab-dc32-4066-a074-aa087eb1941f\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.986258 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc\") pod \"84790bab-dc32-4066-a074-aa087eb1941f\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.986298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph65v\" (UniqueName: \"kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v\") pod \"84790bab-dc32-4066-a074-aa087eb1941f\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.986383 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb\") pod \"84790bab-dc32-4066-a074-aa087eb1941f\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.986428 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config\") pod \"84790bab-dc32-4066-a074-aa087eb1941f\" (UID: \"84790bab-dc32-4066-a074-aa087eb1941f\") " Nov 28 15:40:16 crc kubenswrapper[4884]: I1128 15:40:16.992360 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v" (OuterVolumeSpecName: "kube-api-access-ph65v") pod "84790bab-dc32-4066-a074-aa087eb1941f" (UID: "84790bab-dc32-4066-a074-aa087eb1941f"). InnerVolumeSpecName "kube-api-access-ph65v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.023718 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84790bab-dc32-4066-a074-aa087eb1941f" (UID: "84790bab-dc32-4066-a074-aa087eb1941f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.029315 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "84790bab-dc32-4066-a074-aa087eb1941f" (UID: "84790bab-dc32-4066-a074-aa087eb1941f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.029645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config" (OuterVolumeSpecName: "config") pod "84790bab-dc32-4066-a074-aa087eb1941f" (UID: "84790bab-dc32-4066-a074-aa087eb1941f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.038501 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "84790bab-dc32-4066-a074-aa087eb1941f" (UID: "84790bab-dc32-4066-a074-aa087eb1941f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.088228 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.088262 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.088272 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.088282 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph65v\" (UniqueName: \"kubernetes.io/projected/84790bab-dc32-4066-a074-aa087eb1941f-kube-api-access-ph65v\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.088291 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84790bab-dc32-4066-a074-aa087eb1941f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.697973 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-74d7m" event={"ID":"84790bab-dc32-4066-a074-aa087eb1941f","Type":"ContainerDied","Data":"059ca57656a624aa35f1601035d8b34fe0c576ddca40b32d4929a569a11ddf7b"} Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.698322 4884 scope.go:117] "RemoveContainer" containerID="33d362bf4ccafad300ddf951e246fdb4762f1ab1eab7513ba07a876a4f49be73" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.698045 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-74d7m" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.718527 4884 scope.go:117] "RemoveContainer" containerID="a251abc08c9527a6593859012011b63ba4bf38b1234148571c99b68268018d3e" Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.735759 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:17 crc kubenswrapper[4884]: I1128 15:40:17.748536 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-74d7m"] Nov 28 15:40:18 crc kubenswrapper[4884]: I1128 15:40:18.701665 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84790bab-dc32-4066-a074-aa087eb1941f" path="/var/lib/kubelet/pods/84790bab-dc32-4066-a074-aa087eb1941f/volumes" Nov 28 15:40:20 crc kubenswrapper[4884]: I1128 15:40:20.564418 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 15:40:20 crc kubenswrapper[4884]: I1128 15:40:20.724737 4884 generic.go:334] "Generic (PLEG): container finished" podID="a24664a4-3143-4fb4-b050-30b81e52a1f3" containerID="c281696f1781e0ccfc031988e9712f4a3ced548d63c03915d8d3afac3ba9f01f" exitCode=0 Nov 28 15:40:20 crc kubenswrapper[4884]: I1128 15:40:20.724910 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dn2ww" event={"ID":"a24664a4-3143-4fb4-b050-30b81e52a1f3","Type":"ContainerDied","Data":"c281696f1781e0ccfc031988e9712f4a3ced548d63c03915d8d3afac3ba9f01f"} Nov 28 15:40:21 crc kubenswrapper[4884]: I1128 15:40:21.409786 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 15:40:21 crc kubenswrapper[4884]: I1128 15:40:21.409840 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 15:40:21 crc kubenswrapper[4884]: I1128 15:40:21.453793 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 15:40:21 crc kubenswrapper[4884]: I1128 15:40:21.790805 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.183032 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317716 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317806 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317846 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317883 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnjhl\" (UniqueName: \"kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317958 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.317985 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.318034 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf\") pod \"a24664a4-3143-4fb4-b050-30b81e52a1f3\" (UID: \"a24664a4-3143-4fb4-b050-30b81e52a1f3\") " Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.318269 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.319583 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.319735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.324979 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"swift-storage-0\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " pod="openstack/swift-storage-0" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.327202 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.332268 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl" (OuterVolumeSpecName: "kube-api-access-cnjhl") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "kube-api-access-cnjhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.346777 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.350157 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts" (OuterVolumeSpecName: "scripts") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.362426 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a24664a4-3143-4fb4-b050-30b81e52a1f3" (UID: "a24664a4-3143-4fb4-b050-30b81e52a1f3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420288 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnjhl\" (UniqueName: \"kubernetes.io/projected/a24664a4-3143-4fb4-b050-30b81e52a1f3-kube-api-access-cnjhl\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420319 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420328 4884 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a24664a4-3143-4fb4-b050-30b81e52a1f3-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420339 4884 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420347 4884 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420356 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24664a4-3143-4fb4-b050-30b81e52a1f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.420364 4884 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a24664a4-3143-4fb4-b050-30b81e52a1f3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.574137 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.744993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-dn2ww" event={"ID":"a24664a4-3143-4fb4-b050-30b81e52a1f3","Type":"ContainerDied","Data":"b9020560ea6fb1f0c6208acf3c5a06d0449a9f91352f6e755e1fb470e9b6e67d"} Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.745381 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9020560ea6fb1f0c6208acf3c5a06d0449a9f91352f6e755e1fb470e9b6e67d" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.745026 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-dn2ww" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.777015 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-2mmr5"] Nov 28 15:40:22 crc kubenswrapper[4884]: E1128 15:40:22.777349 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a24664a4-3143-4fb4-b050-30b81e52a1f3" containerName="swift-ring-rebalance" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.777360 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a24664a4-3143-4fb4-b050-30b81e52a1f3" containerName="swift-ring-rebalance" Nov 28 15:40:22 crc kubenswrapper[4884]: E1128 15:40:22.777374 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="dnsmasq-dns" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.777380 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="dnsmasq-dns" Nov 28 15:40:22 crc kubenswrapper[4884]: E1128 15:40:22.777399 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="init" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.777407 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="init" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.779738 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a24664a4-3143-4fb4-b050-30b81e52a1f3" containerName="swift-ring-rebalance" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.779767 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="84790bab-dc32-4066-a074-aa087eb1941f" containerName="dnsmasq-dns" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.780422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.797466 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2mmr5"] Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.909468 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:40:22 crc kubenswrapper[4884]: W1128 15:40:22.916067 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod772cd3d6_daa4_4494_9ce8_6182e011fbc4.slice/crio-504b2bb90da5e9230841c5ff12d642bce3a47453c25abfccf8fcd39258473aee WatchSource:0}: Error finding container 504b2bb90da5e9230841c5ff12d642bce3a47453c25abfccf8fcd39258473aee: Status 404 returned error can't find the container with id 504b2bb90da5e9230841c5ff12d642bce3a47453c25abfccf8fcd39258473aee Nov 28 15:40:22 crc kubenswrapper[4884]: I1128 15:40:22.929025 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwvsx\" (UniqueName: \"kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx\") pod \"keystone-db-create-2mmr5\" (UID: \"23bd4715-1fa9-48a3-8bc0-7d92059de47d\") " pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.025561 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-z89zk"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.026603 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z89zk" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.031169 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwvsx\" (UniqueName: \"kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx\") pod \"keystone-db-create-2mmr5\" (UID: \"23bd4715-1fa9-48a3-8bc0-7d92059de47d\") " pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.032854 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z89zk"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.059792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwvsx\" (UniqueName: \"kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx\") pod \"keystone-db-create-2mmr5\" (UID: \"23bd4715-1fa9-48a3-8bc0-7d92059de47d\") " pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.111148 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.132723 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtnrz\" (UniqueName: \"kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz\") pod \"placement-db-create-z89zk\" (UID: \"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1\") " pod="openstack/placement-db-create-z89zk" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.234342 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtnrz\" (UniqueName: \"kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz\") pod \"placement-db-create-z89zk\" (UID: \"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1\") " pod="openstack/placement-db-create-z89zk" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.249770 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtnrz\" (UniqueName: \"kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz\") pod \"placement-db-create-z89zk\" (UID: \"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1\") " pod="openstack/placement-db-create-z89zk" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.361152 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z89zk" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.415181 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-ml6x7"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.417008 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.438406 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-ml6x7"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.463868 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvvtk\" (UniqueName: \"kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk\") pod \"glance-db-create-ml6x7\" (UID: \"f45dbb12-57af-4477-8456-37834db54a0c\") " pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.566001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvvtk\" (UniqueName: \"kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk\") pod \"glance-db-create-ml6x7\" (UID: \"f45dbb12-57af-4477-8456-37834db54a0c\") " pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.625681 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-z89zk"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.715227 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvvtk\" (UniqueName: \"kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk\") pod \"glance-db-create-ml6x7\" (UID: \"f45dbb12-57af-4477-8456-37834db54a0c\") " pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:23 crc kubenswrapper[4884]: W1128 15:40:23.722946 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23bd4715_1fa9_48a3_8bc0_7d92059de47d.slice/crio-72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac WatchSource:0}: Error finding container 72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac: Status 404 returned error can't find the container with id 72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.724530 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2mmr5"] Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.753827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z89zk" event={"ID":"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1","Type":"ContainerStarted","Data":"0033e4c86c108724c07dea6577058de696e6554a978f7a2a42bece05b352e709"} Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.754855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"504b2bb90da5e9230841c5ff12d642bce3a47453c25abfccf8fcd39258473aee"} Nov 28 15:40:23 crc kubenswrapper[4884]: I1128 15:40:23.756163 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2mmr5" event={"ID":"23bd4715-1fa9-48a3-8bc0-7d92059de47d","Type":"ContainerStarted","Data":"72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac"} Nov 28 15:40:24 crc kubenswrapper[4884]: I1128 15:40:24.007450 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:24 crc kubenswrapper[4884]: I1128 15:40:24.441116 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-ml6x7"] Nov 28 15:40:24 crc kubenswrapper[4884]: W1128 15:40:24.445663 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf45dbb12_57af_4477_8456_37834db54a0c.slice/crio-17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a WatchSource:0}: Error finding container 17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a: Status 404 returned error can't find the container with id 17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a Nov 28 15:40:24 crc kubenswrapper[4884]: I1128 15:40:24.767379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ml6x7" event={"ID":"f45dbb12-57af-4477-8456-37834db54a0c","Type":"ContainerStarted","Data":"17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a"} Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.785574 4884 generic.go:334] "Generic (PLEG): container finished" podID="0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" containerID="2f8389f9ec7174a8e9d82dc2ac13c93505a07fdd6567c79634dda479b20e57b0" exitCode=0 Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.785662 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z89zk" event={"ID":"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1","Type":"ContainerDied","Data":"2f8389f9ec7174a8e9d82dc2ac13c93505a07fdd6567c79634dda479b20e57b0"} Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.791841 4884 generic.go:334] "Generic (PLEG): container finished" podID="23bd4715-1fa9-48a3-8bc0-7d92059de47d" containerID="9dd9057f62ee5c9c6275f559b3176fdfa300b26d1bbfeff07c5f5c7e8c62c3c1" exitCode=0 Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.791990 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2mmr5" event={"ID":"23bd4715-1fa9-48a3-8bc0-7d92059de47d","Type":"ContainerDied","Data":"9dd9057f62ee5c9c6275f559b3176fdfa300b26d1bbfeff07c5f5c7e8c62c3c1"} Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.794693 4884 generic.go:334] "Generic (PLEG): container finished" podID="f45dbb12-57af-4477-8456-37834db54a0c" containerID="1d304903df4db4958b0ffe4823917fe92702684454004f9d45c4f96d73b84b9a" exitCode=0 Nov 28 15:40:26 crc kubenswrapper[4884]: I1128 15:40:26.794746 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ml6x7" event={"ID":"f45dbb12-57af-4477-8456-37834db54a0c","Type":"ContainerDied","Data":"1d304903df4db4958b0ffe4823917fe92702684454004f9d45c4f96d73b84b9a"} Nov 28 15:40:27 crc kubenswrapper[4884]: I1128 15:40:27.809749 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a"} Nov 28 15:40:27 crc kubenswrapper[4884]: I1128 15:40:27.810187 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0"} Nov 28 15:40:27 crc kubenswrapper[4884]: I1128 15:40:27.810216 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8"} Nov 28 15:40:27 crc kubenswrapper[4884]: I1128 15:40:27.810238 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09"} Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.281283 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.286408 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z89zk" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.311839 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.350845 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvvtk\" (UniqueName: \"kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk\") pod \"f45dbb12-57af-4477-8456-37834db54a0c\" (UID: \"f45dbb12-57af-4477-8456-37834db54a0c\") " Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.351033 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwvsx\" (UniqueName: \"kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx\") pod \"23bd4715-1fa9-48a3-8bc0-7d92059de47d\" (UID: \"23bd4715-1fa9-48a3-8bc0-7d92059de47d\") " Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.351301 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtnrz\" (UniqueName: \"kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz\") pod \"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1\" (UID: \"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1\") " Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.357245 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz" (OuterVolumeSpecName: "kube-api-access-dtnrz") pod "0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" (UID: "0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1"). InnerVolumeSpecName "kube-api-access-dtnrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.357877 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk" (OuterVolumeSpecName: "kube-api-access-hvvtk") pod "f45dbb12-57af-4477-8456-37834db54a0c" (UID: "f45dbb12-57af-4477-8456-37834db54a0c"). InnerVolumeSpecName "kube-api-access-hvvtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.358799 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx" (OuterVolumeSpecName: "kube-api-access-fwvsx") pod "23bd4715-1fa9-48a3-8bc0-7d92059de47d" (UID: "23bd4715-1fa9-48a3-8bc0-7d92059de47d"). InnerVolumeSpecName "kube-api-access-fwvsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.453476 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtnrz\" (UniqueName: \"kubernetes.io/projected/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1-kube-api-access-dtnrz\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.453511 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvvtk\" (UniqueName: \"kubernetes.io/projected/f45dbb12-57af-4477-8456-37834db54a0c-kube-api-access-hvvtk\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.453560 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwvsx\" (UniqueName: \"kubernetes.io/projected/23bd4715-1fa9-48a3-8bc0-7d92059de47d-kube-api-access-fwvsx\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.724690 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:40:28 crc kubenswrapper[4884]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 15:40:28 crc kubenswrapper[4884]: > Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.819922 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2mmr5" event={"ID":"23bd4715-1fa9-48a3-8bc0-7d92059de47d","Type":"ContainerDied","Data":"72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac"} Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.819973 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72af97616200c68bdd5e99e40f8474e91c64ef615412a0e7b13b5741f98139ac" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.820129 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2mmr5" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.822117 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-ml6x7" event={"ID":"f45dbb12-57af-4477-8456-37834db54a0c","Type":"ContainerDied","Data":"17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a"} Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.822140 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-ml6x7" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.822154 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17493dd78bebbcc459b1fdd98d822810bfa8e955b474222caba86db42622382a" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.824155 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-z89zk" event={"ID":"0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1","Type":"ContainerDied","Data":"0033e4c86c108724c07dea6577058de696e6554a978f7a2a42bece05b352e709"} Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.824206 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0033e4c86c108724c07dea6577058de696e6554a978f7a2a42bece05b352e709" Nov 28 15:40:28 crc kubenswrapper[4884]: I1128 15:40:28.824209 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-z89zk" Nov 28 15:40:29 crc kubenswrapper[4884]: I1128 15:40:29.837657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d"} Nov 28 15:40:31 crc kubenswrapper[4884]: I1128 15:40:31.873268 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a"} Nov 28 15:40:31 crc kubenswrapper[4884]: I1128 15:40:31.873691 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754"} Nov 28 15:40:32 crc kubenswrapper[4884]: I1128 15:40:32.885410 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83"} Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.496983 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-32ab-account-create-jpj8s"] Nov 28 15:40:33 crc kubenswrapper[4884]: E1128 15:40:33.497655 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23bd4715-1fa9-48a3-8bc0-7d92059de47d" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497678 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23bd4715-1fa9-48a3-8bc0-7d92059de47d" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: E1128 15:40:33.497694 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f45dbb12-57af-4477-8456-37834db54a0c" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497701 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f45dbb12-57af-4477-8456-37834db54a0c" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: E1128 15:40:33.497713 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497719 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497948 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497969 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="23bd4715-1fa9-48a3-8bc0-7d92059de47d" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.497984 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f45dbb12-57af-4477-8456-37834db54a0c" containerName="mariadb-database-create" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.498573 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.501017 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.520588 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-32ab-account-create-jpj8s"] Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.644320 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpgtp\" (UniqueName: \"kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp\") pod \"glance-32ab-account-create-jpj8s\" (UID: \"ad44ad3b-6966-47ed-ac5d-fb310b053dbc\") " pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.745746 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpgtp\" (UniqueName: \"kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp\") pod \"glance-32ab-account-create-jpj8s\" (UID: \"ad44ad3b-6966-47ed-ac5d-fb310b053dbc\") " pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.762556 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:40:33 crc kubenswrapper[4884]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 15:40:33 crc kubenswrapper[4884]: > Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.766681 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.779186 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpgtp\" (UniqueName: \"kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp\") pod \"glance-32ab-account-create-jpj8s\" (UID: \"ad44ad3b-6966-47ed-ac5d-fb310b053dbc\") " pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.805894 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.822129 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.943106 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff"} Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.943149 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5"} Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.943184 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024"} Nov 28 15:40:33 crc kubenswrapper[4884]: I1128 15:40:33.943197 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.048438 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tnsft-config-hn99k"] Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.052174 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.054692 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.064690 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnsft-config-hn99k"] Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtngq\" (UniqueName: \"kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066829 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066857 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066919 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.066966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167721 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtngq\" (UniqueName: \"kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167759 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167816 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167841 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167879 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.167907 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.168605 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.168661 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.168668 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.168739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.170644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.205933 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtngq\" (UniqueName: \"kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq\") pod \"ovn-controller-tnsft-config-hn99k\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.381217 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.474817 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-32ab-account-create-jpj8s"] Nov 28 15:40:34 crc kubenswrapper[4884]: W1128 15:40:34.481559 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad44ad3b_6966_47ed_ac5d_fb310b053dbc.slice/crio-9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650 WatchSource:0}: Error finding container 9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650: Status 404 returned error can't find the container with id 9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650 Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.835513 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnsft-config-hn99k"] Nov 28 15:40:34 crc kubenswrapper[4884]: W1128 15:40:34.840768 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6b5b439_e458_4a0f_89eb_e3fef48d5639.slice/crio-d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6 WatchSource:0}: Error finding container d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6: Status 404 returned error can't find the container with id d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6 Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.959616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.959654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.959663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerStarted","Data":"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.963585 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad44ad3b-6966-47ed-ac5d-fb310b053dbc" containerID="e60d16879a4e31788c2d4d904429d05b12572a36e15203f8e2455f81a2057497" exitCode=0 Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.963633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-32ab-account-create-jpj8s" event={"ID":"ad44ad3b-6966-47ed-ac5d-fb310b053dbc","Type":"ContainerDied","Data":"e60d16879a4e31788c2d4d904429d05b12572a36e15203f8e2455f81a2057497"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.963649 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-32ab-account-create-jpj8s" event={"ID":"ad44ad3b-6966-47ed-ac5d-fb310b053dbc","Type":"ContainerStarted","Data":"9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650"} Nov 28 15:40:34 crc kubenswrapper[4884]: I1128 15:40:34.975533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft-config-hn99k" event={"ID":"d6b5b439-e458-4a0f-89eb-e3fef48d5639","Type":"ContainerStarted","Data":"d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6"} Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.001574 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=19.806453992 podStartE2EDuration="30.001554517s" podCreationTimestamp="2025-11-28 15:40:05 +0000 UTC" firstStartedPulling="2025-11-28 15:40:22.91863118 +0000 UTC m=+1262.481414981" lastFinishedPulling="2025-11-28 15:40:33.113731705 +0000 UTC m=+1272.676515506" observedRunningTime="2025-11-28 15:40:34.994224509 +0000 UTC m=+1274.557008320" watchObservedRunningTime="2025-11-28 15:40:35.001554517 +0000 UTC m=+1274.564338328" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.244397 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.245793 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.248154 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.254222 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.388958 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.389011 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.389062 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.389228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.389275 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58wjp\" (UniqueName: \"kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.389468 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491842 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491873 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491929 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491964 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.491988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58wjp\" (UniqueName: \"kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.493124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.493143 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.493124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.493343 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.494204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.517708 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58wjp\" (UniqueName: \"kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp\") pod \"dnsmasq-dns-6d5b6d6b67-kvqlh\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.560631 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.824470 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:40:35 crc kubenswrapper[4884]: I1128 15:40:35.984300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" event={"ID":"4c4f3feb-a977-4c65-8c72-b638fa15027a","Type":"ContainerStarted","Data":"c3212654d82c894f719afda98bc6acb51e1f82a376691311cb4bfc935f36eb7b"} Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.402829 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.511410 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpgtp\" (UniqueName: \"kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp\") pod \"ad44ad3b-6966-47ed-ac5d-fb310b053dbc\" (UID: \"ad44ad3b-6966-47ed-ac5d-fb310b053dbc\") " Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.516423 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp" (OuterVolumeSpecName: "kube-api-access-tpgtp") pod "ad44ad3b-6966-47ed-ac5d-fb310b053dbc" (UID: "ad44ad3b-6966-47ed-ac5d-fb310b053dbc"). InnerVolumeSpecName "kube-api-access-tpgtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.613385 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpgtp\" (UniqueName: \"kubernetes.io/projected/ad44ad3b-6966-47ed-ac5d-fb310b053dbc-kube-api-access-tpgtp\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.995493 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerID="e848dab97da0f5a302adae56e051c89f8c322c97e30a1719d8d3a88bf59eabd4" exitCode=0 Nov 28 15:40:36 crc kubenswrapper[4884]: I1128 15:40:36.995552 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" event={"ID":"4c4f3feb-a977-4c65-8c72-b638fa15027a","Type":"ContainerDied","Data":"e848dab97da0f5a302adae56e051c89f8c322c97e30a1719d8d3a88bf59eabd4"} Nov 28 15:40:37 crc kubenswrapper[4884]: I1128 15:40:36.999503 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-32ab-account-create-jpj8s" event={"ID":"ad44ad3b-6966-47ed-ac5d-fb310b053dbc","Type":"ContainerDied","Data":"9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650"} Nov 28 15:40:37 crc kubenswrapper[4884]: I1128 15:40:36.999532 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ab7082251478d229306508b4127163c8572004bf0e61202d7d4959140640650" Nov 28 15:40:37 crc kubenswrapper[4884]: I1128 15:40:36.999589 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-32ab-account-create-jpj8s" Nov 28 15:40:37 crc kubenswrapper[4884]: I1128 15:40:37.003445 4884 generic.go:334] "Generic (PLEG): container finished" podID="d6b5b439-e458-4a0f-89eb-e3fef48d5639" containerID="28ef434d21a23e4515e4b14857316f3404e4fe7b7948ccd1d761ac4aa596a8b7" exitCode=0 Nov 28 15:40:37 crc kubenswrapper[4884]: I1128 15:40:37.003492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft-config-hn99k" event={"ID":"d6b5b439-e458-4a0f-89eb-e3fef48d5639","Type":"ContainerDied","Data":"28ef434d21a23e4515e4b14857316f3404e4fe7b7948ccd1d761ac4aa596a8b7"} Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.012070 4884 generic.go:334] "Generic (PLEG): container finished" podID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerID="261aa2bf3aa707f9ac09db8380136376399868fb0e2b7fcc555077a346a02c2f" exitCode=0 Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.012160 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerDied","Data":"261aa2bf3aa707f9ac09db8380136376399868fb0e2b7fcc555077a346a02c2f"} Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.015836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" event={"ID":"4c4f3feb-a977-4c65-8c72-b638fa15027a","Type":"ContainerStarted","Data":"6055a9d2a5ed4e74724da67b6844fa4ecb94ae7d26d6c9b96565e587ae4299c3"} Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.073562 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" podStartSLOduration=3.073346901 podStartE2EDuration="3.073346901s" podCreationTimestamp="2025-11-28 15:40:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:40:38.066918854 +0000 UTC m=+1277.629702665" watchObservedRunningTime="2025-11-28 15:40:38.073346901 +0000 UTC m=+1277.636130702" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.335131 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470266 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470378 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtngq\" (UniqueName: \"kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470406 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470455 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470545 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470600 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn\") pod \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\" (UID: \"d6b5b439-e458-4a0f-89eb-e3fef48d5639\") " Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470949 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.470979 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run" (OuterVolumeSpecName: "var-run") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.471670 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.472223 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.473347 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts" (OuterVolumeSpecName: "scripts") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.476193 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq" (OuterVolumeSpecName: "kube-api-access-gtngq") pod "d6b5b439-e458-4a0f-89eb-e3fef48d5639" (UID: "d6b5b439-e458-4a0f-89eb-e3fef48d5639"). InnerVolumeSpecName "kube-api-access-gtngq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572607 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtngq\" (UniqueName: \"kubernetes.io/projected/d6b5b439-e458-4a0f-89eb-e3fef48d5639-kube-api-access-gtngq\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572916 4884 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572933 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572946 4884 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d6b5b439-e458-4a0f-89eb-e3fef48d5639-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572961 4884 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.572973 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d6b5b439-e458-4a0f-89eb-e3fef48d5639-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.650642 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-m6njq"] Nov 28 15:40:38 crc kubenswrapper[4884]: E1128 15:40:38.651029 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad44ad3b-6966-47ed-ac5d-fb310b053dbc" containerName="mariadb-account-create" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.651046 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad44ad3b-6966-47ed-ac5d-fb310b053dbc" containerName="mariadb-account-create" Nov 28 15:40:38 crc kubenswrapper[4884]: E1128 15:40:38.651060 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6b5b439-e458-4a0f-89eb-e3fef48d5639" containerName="ovn-config" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.651066 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6b5b439-e458-4a0f-89eb-e3fef48d5639" containerName="ovn-config" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.651257 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad44ad3b-6966-47ed-ac5d-fb310b053dbc" containerName="mariadb-account-create" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.651277 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6b5b439-e458-4a0f-89eb-e3fef48d5639" containerName="ovn-config" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.651805 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.653753 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmtm9" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.654422 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.660512 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m6njq"] Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.746532 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-tnsft" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.775999 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc2gz\" (UniqueName: \"kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.776084 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.776242 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.776279 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.878147 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc2gz\" (UniqueName: \"kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.878237 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.878338 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.878359 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.882378 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.882483 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.883615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.906775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc2gz\" (UniqueName: \"kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz\") pod \"glance-db-sync-m6njq\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:38 crc kubenswrapper[4884]: I1128 15:40:38.998017 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m6njq" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.044656 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft-config-hn99k" event={"ID":"d6b5b439-e458-4a0f-89eb-e3fef48d5639","Type":"ContainerDied","Data":"d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6"} Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.044700 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d756942984e200eaf6d7cbe75e87d25e6871356b4690221e19209808bedc94a6" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.044744 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft-config-hn99k" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.060996 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerStarted","Data":"343440ad4aebfa33b7450af592a1090831143203f96732b5193c19160e303a6f"} Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.061081 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.061350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.116341 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.128881035 podStartE2EDuration="1m12.116320607s" podCreationTimestamp="2025-11-28 15:39:27 +0000 UTC" firstStartedPulling="2025-11-28 15:39:29.505415617 +0000 UTC m=+1209.068199418" lastFinishedPulling="2025-11-28 15:40:04.492855179 +0000 UTC m=+1244.055638990" observedRunningTime="2025-11-28 15:40:39.101645399 +0000 UTC m=+1278.664429200" watchObservedRunningTime="2025-11-28 15:40:39.116320607 +0000 UTC m=+1278.679104408" Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.462554 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tnsft-config-hn99k"] Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.472146 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tnsft-config-hn99k"] Nov 28 15:40:39 crc kubenswrapper[4884]: I1128 15:40:39.564902 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m6njq"] Nov 28 15:40:39 crc kubenswrapper[4884]: W1128 15:40:39.573974 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54b9e833_6baa_48ca_9b62_5b288f49c020.slice/crio-c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb WatchSource:0}: Error finding container c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb: Status 404 returned error can't find the container with id c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb Nov 28 15:40:40 crc kubenswrapper[4884]: I1128 15:40:40.071523 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m6njq" event={"ID":"54b9e833-6baa-48ca-9b62-5b288f49c020","Type":"ContainerStarted","Data":"c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb"} Nov 28 15:40:40 crc kubenswrapper[4884]: I1128 15:40:40.074279 4884 generic.go:334] "Generic (PLEG): container finished" podID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerID="1f257c9ad37cf0a9ca938f7cf1d6a36a190fd223c5c58821c4e590d39987fda9" exitCode=0 Nov 28 15:40:40 crc kubenswrapper[4884]: I1128 15:40:40.074327 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerDied","Data":"1f257c9ad37cf0a9ca938f7cf1d6a36a190fd223c5c58821c4e590d39987fda9"} Nov 28 15:40:40 crc kubenswrapper[4884]: I1128 15:40:40.700322 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6b5b439-e458-4a0f-89eb-e3fef48d5639" path="/var/lib/kubelet/pods/d6b5b439-e458-4a0f-89eb-e3fef48d5639/volumes" Nov 28 15:40:41 crc kubenswrapper[4884]: I1128 15:40:41.085641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerStarted","Data":"5cea516e9ac81f0a76972ae4e518242f21039bb650a8fa7f56df976c1aec2cbc"} Nov 28 15:40:41 crc kubenswrapper[4884]: I1128 15:40:41.085934 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:40:41 crc kubenswrapper[4884]: I1128 15:40:41.121056 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371963.733744 podStartE2EDuration="1m13.121032017s" podCreationTimestamp="2025-11-28 15:39:28 +0000 UTC" firstStartedPulling="2025-11-28 15:39:30.443443781 +0000 UTC m=+1210.006227582" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:40:41.113455432 +0000 UTC m=+1280.676239323" watchObservedRunningTime="2025-11-28 15:40:41.121032017 +0000 UTC m=+1280.683815818" Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.793801 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-482c-account-create-cmhd8"] Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.804384 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-482c-account-create-cmhd8"] Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.804469 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.806967 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.845490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5q56\" (UniqueName: \"kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56\") pod \"keystone-482c-account-create-cmhd8\" (UID: \"0e723bad-9c0e-48e4-a08e-b95c84e15b81\") " pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.947267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5q56\" (UniqueName: \"kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56\") pod \"keystone-482c-account-create-cmhd8\" (UID: \"0e723bad-9c0e-48e4-a08e-b95c84e15b81\") " pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:42 crc kubenswrapper[4884]: I1128 15:40:42.971135 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5q56\" (UniqueName: \"kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56\") pod \"keystone-482c-account-create-cmhd8\" (UID: \"0e723bad-9c0e-48e4-a08e-b95c84e15b81\") " pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.110529 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-eaff-account-create-pqqn6"] Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.111765 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.115925 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.120142 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-eaff-account-create-pqqn6"] Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.130713 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.150373 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnlsz\" (UniqueName: \"kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz\") pod \"placement-eaff-account-create-pqqn6\" (UID: \"7f194cf7-2f65-4dde-842a-7b93fb8148b9\") " pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.290595 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnlsz\" (UniqueName: \"kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz\") pod \"placement-eaff-account-create-pqqn6\" (UID: \"7f194cf7-2f65-4dde-842a-7b93fb8148b9\") " pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.316429 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnlsz\" (UniqueName: \"kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz\") pod \"placement-eaff-account-create-pqqn6\" (UID: \"7f194cf7-2f65-4dde-842a-7b93fb8148b9\") " pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.427342 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.637227 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-482c-account-create-cmhd8"] Nov 28 15:40:43 crc kubenswrapper[4884]: I1128 15:40:43.923975 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-eaff-account-create-pqqn6"] Nov 28 15:40:44 crc kubenswrapper[4884]: I1128 15:40:44.118713 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eaff-account-create-pqqn6" event={"ID":"7f194cf7-2f65-4dde-842a-7b93fb8148b9","Type":"ContainerStarted","Data":"714246540c84e5a570c16ca516b2a8d1a8c0efec3554ef3add1e567c819f5903"} Nov 28 15:40:44 crc kubenswrapper[4884]: I1128 15:40:44.120104 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e723bad-9c0e-48e4-a08e-b95c84e15b81" containerID="c31ec74fdd2fed531250dfc9d5fc9e3011db90e8ed2175936a67f679bcb55eb2" exitCode=0 Nov 28 15:40:44 crc kubenswrapper[4884]: I1128 15:40:44.120134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-482c-account-create-cmhd8" event={"ID":"0e723bad-9c0e-48e4-a08e-b95c84e15b81","Type":"ContainerDied","Data":"c31ec74fdd2fed531250dfc9d5fc9e3011db90e8ed2175936a67f679bcb55eb2"} Nov 28 15:40:44 crc kubenswrapper[4884]: I1128 15:40:44.120148 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-482c-account-create-cmhd8" event={"ID":"0e723bad-9c0e-48e4-a08e-b95c84e15b81","Type":"ContainerStarted","Data":"9c3b3ccdfce883319182834b954277d7bad51768059bd1436dc35fbb0cc527c0"} Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.134563 4884 generic.go:334] "Generic (PLEG): container finished" podID="7f194cf7-2f65-4dde-842a-7b93fb8148b9" containerID="4e35f8f692380c8cb19ad1be2fbac1bc3ab5001506118e1d8fa4d56ebc61177a" exitCode=0 Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.135414 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eaff-account-create-pqqn6" event={"ID":"7f194cf7-2f65-4dde-842a-7b93fb8148b9","Type":"ContainerDied","Data":"4e35f8f692380c8cb19ad1be2fbac1bc3ab5001506118e1d8fa4d56ebc61177a"} Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.454620 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.527971 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5q56\" (UniqueName: \"kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56\") pod \"0e723bad-9c0e-48e4-a08e-b95c84e15b81\" (UID: \"0e723bad-9c0e-48e4-a08e-b95c84e15b81\") " Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.540535 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56" (OuterVolumeSpecName: "kube-api-access-x5q56") pod "0e723bad-9c0e-48e4-a08e-b95c84e15b81" (UID: "0e723bad-9c0e-48e4-a08e-b95c84e15b81"). InnerVolumeSpecName "kube-api-access-x5q56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.562867 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.615993 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.616936 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" containerID="cri-o://589b4f6507cc132b62d1fae62d2f303af7a7d6c6e68b173e7fe0518fce07931f" gracePeriod=10 Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.630116 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5q56\" (UniqueName: \"kubernetes.io/projected/0e723bad-9c0e-48e4-a08e-b95c84e15b81-kube-api-access-x5q56\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:45 crc kubenswrapper[4884]: I1128 15:40:45.739344 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: connect: connection refused" Nov 28 15:40:46 crc kubenswrapper[4884]: I1128 15:40:46.146710 4884 generic.go:334] "Generic (PLEG): container finished" podID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerID="589b4f6507cc132b62d1fae62d2f303af7a7d6c6e68b173e7fe0518fce07931f" exitCode=0 Nov 28 15:40:46 crc kubenswrapper[4884]: I1128 15:40:46.146779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" event={"ID":"19503544-c3b2-4d1f-9abb-8af9baac851a","Type":"ContainerDied","Data":"589b4f6507cc132b62d1fae62d2f303af7a7d6c6e68b173e7fe0518fce07931f"} Nov 28 15:40:46 crc kubenswrapper[4884]: I1128 15:40:46.151883 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-482c-account-create-cmhd8" Nov 28 15:40:46 crc kubenswrapper[4884]: I1128 15:40:46.152199 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-482c-account-create-cmhd8" event={"ID":"0e723bad-9c0e-48e4-a08e-b95c84e15b81","Type":"ContainerDied","Data":"9c3b3ccdfce883319182834b954277d7bad51768059bd1436dc35fbb0cc527c0"} Nov 28 15:40:46 crc kubenswrapper[4884]: I1128 15:40:46.152235 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c3b3ccdfce883319182834b954277d7bad51768059bd1436dc35fbb0cc527c0" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.219242 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.542941 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-tghnk"] Nov 28 15:40:49 crc kubenswrapper[4884]: E1128 15:40:49.543325 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e723bad-9c0e-48e4-a08e-b95c84e15b81" containerName="mariadb-account-create" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.543343 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e723bad-9c0e-48e4-a08e-b95c84e15b81" containerName="mariadb-account-create" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.543521 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e723bad-9c0e-48e4-a08e-b95c84e15b81" containerName="mariadb-account-create" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.544040 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-tghnk" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.568950 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-tghnk"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.596560 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsvpd\" (UniqueName: \"kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd\") pod \"cinder-db-create-tghnk\" (UID: \"33d7bf6d-b03e-493b-9e3f-6dcb2b223681\") " pod="openstack/cinder-db-create-tghnk" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.648169 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-4qkr7"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.654129 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4qkr7" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.657825 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4qkr7"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.697879 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsvpd\" (UniqueName: \"kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd\") pod \"cinder-db-create-tghnk\" (UID: \"33d7bf6d-b03e-493b-9e3f-6dcb2b223681\") " pod="openstack/cinder-db-create-tghnk" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.697941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvt2s\" (UniqueName: \"kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s\") pod \"barbican-db-create-4qkr7\" (UID: \"20e1b396-c515-4176-9402-72f570001c08\") " pod="openstack/barbican-db-create-4qkr7" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.714943 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsvpd\" (UniqueName: \"kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd\") pod \"cinder-db-create-tghnk\" (UID: \"33d7bf6d-b03e-493b-9e3f-6dcb2b223681\") " pod="openstack/cinder-db-create-tghnk" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.767677 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-npbhj"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.768687 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npbhj" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.782920 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-npbhj"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.802839 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvt2s\" (UniqueName: \"kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s\") pod \"barbican-db-create-4qkr7\" (UID: \"20e1b396-c515-4176-9402-72f570001c08\") " pod="openstack/barbican-db-create-4qkr7" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.829128 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvt2s\" (UniqueName: \"kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s\") pod \"barbican-db-create-4qkr7\" (UID: \"20e1b396-c515-4176-9402-72f570001c08\") " pod="openstack/barbican-db-create-4qkr7" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.830578 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-tj565"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.831668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.839668 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tj565"] Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.844552 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.844613 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pjtcl" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.844786 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.844858 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.860559 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-tghnk" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.904392 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.904479 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7fwv\" (UniqueName: \"kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.904501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z85s\" (UniqueName: \"kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s\") pod \"neutron-db-create-npbhj\" (UID: \"4326f7b3-d838-49f3-b54f-574a55b44de4\") " pod="openstack/neutron-db-create-npbhj" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.904541 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:49 crc kubenswrapper[4884]: I1128 15:40:49.967537 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4qkr7" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.006035 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.006141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7fwv\" (UniqueName: \"kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.006164 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z85s\" (UniqueName: \"kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s\") pod \"neutron-db-create-npbhj\" (UID: \"4326f7b3-d838-49f3-b54f-574a55b44de4\") " pod="openstack/neutron-db-create-npbhj" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.006203 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.009878 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.009942 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.020057 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7fwv\" (UniqueName: \"kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv\") pod \"keystone-db-sync-tj565\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.025567 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z85s\" (UniqueName: \"kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s\") pod \"neutron-db-create-npbhj\" (UID: \"4326f7b3-d838-49f3-b54f-574a55b44de4\") " pod="openstack/neutron-db-create-npbhj" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.095880 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npbhj" Nov 28 15:40:50 crc kubenswrapper[4884]: I1128 15:40:50.173314 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tj565" Nov 28 15:40:54 crc kubenswrapper[4884]: I1128 15:40:54.974450 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:54 crc kubenswrapper[4884]: I1128 15:40:54.987136 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmg79\" (UniqueName: \"kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79\") pod \"19503544-c3b2-4d1f-9abb-8af9baac851a\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnlsz\" (UniqueName: \"kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz\") pod \"7f194cf7-2f65-4dde-842a-7b93fb8148b9\" (UID: \"7f194cf7-2f65-4dde-842a-7b93fb8148b9\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113837 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb\") pod \"19503544-c3b2-4d1f-9abb-8af9baac851a\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113885 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc\") pod \"19503544-c3b2-4d1f-9abb-8af9baac851a\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113938 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config\") pod \"19503544-c3b2-4d1f-9abb-8af9baac851a\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.113965 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb\") pod \"19503544-c3b2-4d1f-9abb-8af9baac851a\" (UID: \"19503544-c3b2-4d1f-9abb-8af9baac851a\") " Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.119129 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz" (OuterVolumeSpecName: "kube-api-access-gnlsz") pod "7f194cf7-2f65-4dde-842a-7b93fb8148b9" (UID: "7f194cf7-2f65-4dde-842a-7b93fb8148b9"). InnerVolumeSpecName "kube-api-access-gnlsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.119775 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79" (OuterVolumeSpecName: "kube-api-access-zmg79") pod "19503544-c3b2-4d1f-9abb-8af9baac851a" (UID: "19503544-c3b2-4d1f-9abb-8af9baac851a"). InnerVolumeSpecName "kube-api-access-zmg79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.165228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config" (OuterVolumeSpecName: "config") pod "19503544-c3b2-4d1f-9abb-8af9baac851a" (UID: "19503544-c3b2-4d1f-9abb-8af9baac851a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.174827 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "19503544-c3b2-4d1f-9abb-8af9baac851a" (UID: "19503544-c3b2-4d1f-9abb-8af9baac851a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.183176 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "19503544-c3b2-4d1f-9abb-8af9baac851a" (UID: "19503544-c3b2-4d1f-9abb-8af9baac851a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.185999 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "19503544-c3b2-4d1f-9abb-8af9baac851a" (UID: "19503544-c3b2-4d1f-9abb-8af9baac851a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216271 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216311 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216326 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216341 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmg79\" (UniqueName: \"kubernetes.io/projected/19503544-c3b2-4d1f-9abb-8af9baac851a-kube-api-access-zmg79\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216353 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnlsz\" (UniqueName: \"kubernetes.io/projected/7f194cf7-2f65-4dde-842a-7b93fb8148b9-kube-api-access-gnlsz\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.216365 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/19503544-c3b2-4d1f-9abb-8af9baac851a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.265303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-eaff-account-create-pqqn6" event={"ID":"7f194cf7-2f65-4dde-842a-7b93fb8148b9","Type":"ContainerDied","Data":"714246540c84e5a570c16ca516b2a8d1a8c0efec3554ef3add1e567c819f5903"} Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.265340 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="714246540c84e5a570c16ca516b2a8d1a8c0efec3554ef3add1e567c819f5903" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.265412 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-eaff-account-create-pqqn6" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.270336 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" event={"ID":"19503544-c3b2-4d1f-9abb-8af9baac851a","Type":"ContainerDied","Data":"78cd6d3854d009813ddfa246696c33fc4e2f12f6634d79681cb575a27de594c2"} Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.270383 4884 scope.go:117] "RemoveContainer" containerID="589b4f6507cc132b62d1fae62d2f303af7a7d6c6e68b173e7fe0518fce07931f" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.270492 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.300448 4884 scope.go:117] "RemoveContainer" containerID="8055a67f5ddd9c2922f210bcf891a4421187fa7a8f688eebcb988437cb3c541d" Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.317450 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.328079 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-k2vzb"] Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.354849 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tj565"] Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.419248 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-tghnk"] Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.425647 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-npbhj"] Nov 28 15:40:55 crc kubenswrapper[4884]: W1128 15:40:55.445181 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4326f7b3_d838_49f3_b54f_574a55b44de4.slice/crio-2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8 WatchSource:0}: Error finding container 2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8: Status 404 returned error can't find the container with id 2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8 Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.463262 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-4qkr7"] Nov 28 15:40:55 crc kubenswrapper[4884]: W1128 15:40:55.466135 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20e1b396_c515_4176_9402_72f570001c08.slice/crio-4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0 WatchSource:0}: Error finding container 4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0: Status 404 returned error can't find the container with id 4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0 Nov 28 15:40:55 crc kubenswrapper[4884]: I1128 15:40:55.742352 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-k2vzb" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.115:5353: i/o timeout" Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.289588 4884 generic.go:334] "Generic (PLEG): container finished" podID="20e1b396-c515-4176-9402-72f570001c08" containerID="e502b73487d2bb57b1e75ef0a8faf8c996a390aa1352bf957bb16adf8c8bc21b" exitCode=0 Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.290505 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4qkr7" event={"ID":"20e1b396-c515-4176-9402-72f570001c08","Type":"ContainerDied","Data":"e502b73487d2bb57b1e75ef0a8faf8c996a390aa1352bf957bb16adf8c8bc21b"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.290557 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4qkr7" event={"ID":"20e1b396-c515-4176-9402-72f570001c08","Type":"ContainerStarted","Data":"4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.293480 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-tghnk" event={"ID":"33d7bf6d-b03e-493b-9e3f-6dcb2b223681","Type":"ContainerDied","Data":"a90ee2ce33b2e09b2454ee0a10a11d83c9be70df7298ec7541b4a79227104da6"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.293431 4884 generic.go:334] "Generic (PLEG): container finished" podID="33d7bf6d-b03e-493b-9e3f-6dcb2b223681" containerID="a90ee2ce33b2e09b2454ee0a10a11d83c9be70df7298ec7541b4a79227104da6" exitCode=0 Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.293593 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-tghnk" event={"ID":"33d7bf6d-b03e-493b-9e3f-6dcb2b223681","Type":"ContainerStarted","Data":"81e4ff91a2dc65afbe146ae3c93f9a7010f5f4b7e7852662ab09a6d27c967922"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.297909 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m6njq" event={"ID":"54b9e833-6baa-48ca-9b62-5b288f49c020","Type":"ContainerStarted","Data":"0ead29ea69f9cb39624ccf2cdc69491b89fbdb757e4248352d5579bf98013460"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.301144 4884 generic.go:334] "Generic (PLEG): container finished" podID="4326f7b3-d838-49f3-b54f-574a55b44de4" containerID="6af27ca64073db0df88f0ae368c4d12f0fe58f42eb05597de6f96faf030eabdc" exitCode=0 Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.301203 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npbhj" event={"ID":"4326f7b3-d838-49f3-b54f-574a55b44de4","Type":"ContainerDied","Data":"6af27ca64073db0df88f0ae368c4d12f0fe58f42eb05597de6f96faf030eabdc"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.301228 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npbhj" event={"ID":"4326f7b3-d838-49f3-b54f-574a55b44de4","Type":"ContainerStarted","Data":"2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.302379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tj565" event={"ID":"727e8f17-29bf-4c2f-b91b-f26b036f86f4","Type":"ContainerStarted","Data":"a368c7d988a385faeb083886d7393c8e9a8b4d6bd8c1758d80624529c49dd252"} Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.376279 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-m6njq" podStartSLOduration=3.051526662 podStartE2EDuration="18.376257647s" podCreationTimestamp="2025-11-28 15:40:38 +0000 UTC" firstStartedPulling="2025-11-28 15:40:39.580385539 +0000 UTC m=+1279.143169340" lastFinishedPulling="2025-11-28 15:40:54.905116524 +0000 UTC m=+1294.467900325" observedRunningTime="2025-11-28 15:40:56.366297835 +0000 UTC m=+1295.929081656" watchObservedRunningTime="2025-11-28 15:40:56.376257647 +0000 UTC m=+1295.939041448" Nov 28 15:40:56 crc kubenswrapper[4884]: I1128 15:40:56.706879 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" path="/var/lib/kubelet/pods/19503544-c3b2-4d1f-9abb-8af9baac851a/volumes" Nov 28 15:40:59 crc kubenswrapper[4884]: I1128 15:40:59.914295 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.396585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-npbhj" event={"ID":"4326f7b3-d838-49f3-b54f-574a55b44de4","Type":"ContainerDied","Data":"2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8"} Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.397066 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e5f585511d6362fa66b2c43eb84dbf8e2f37805752e390467dce07a576613d8" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.400917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-4qkr7" event={"ID":"20e1b396-c515-4176-9402-72f570001c08","Type":"ContainerDied","Data":"4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0"} Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.400969 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b44c615f89daf82632ee4f2fc7abc22b2617bd9384e2f86ea0acaa545b9e0f0" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.402876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-tghnk" event={"ID":"33d7bf6d-b03e-493b-9e3f-6dcb2b223681","Type":"ContainerDied","Data":"81e4ff91a2dc65afbe146ae3c93f9a7010f5f4b7e7852662ab09a6d27c967922"} Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.402922 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81e4ff91a2dc65afbe146ae3c93f9a7010f5f4b7e7852662ab09a6d27c967922" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.487318 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-tghnk" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.491420 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npbhj" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.499747 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4qkr7" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.595261 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvt2s\" (UniqueName: \"kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s\") pod \"20e1b396-c515-4176-9402-72f570001c08\" (UID: \"20e1b396-c515-4176-9402-72f570001c08\") " Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.595640 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6z85s\" (UniqueName: \"kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s\") pod \"4326f7b3-d838-49f3-b54f-574a55b44de4\" (UID: \"4326f7b3-d838-49f3-b54f-574a55b44de4\") " Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.595699 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsvpd\" (UniqueName: \"kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd\") pod \"33d7bf6d-b03e-493b-9e3f-6dcb2b223681\" (UID: \"33d7bf6d-b03e-493b-9e3f-6dcb2b223681\") " Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.599911 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s" (OuterVolumeSpecName: "kube-api-access-vvt2s") pod "20e1b396-c515-4176-9402-72f570001c08" (UID: "20e1b396-c515-4176-9402-72f570001c08"). InnerVolumeSpecName "kube-api-access-vvt2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.601406 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s" (OuterVolumeSpecName: "kube-api-access-6z85s") pod "4326f7b3-d838-49f3-b54f-574a55b44de4" (UID: "4326f7b3-d838-49f3-b54f-574a55b44de4"). InnerVolumeSpecName "kube-api-access-6z85s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.602125 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd" (OuterVolumeSpecName: "kube-api-access-wsvpd") pod "33d7bf6d-b03e-493b-9e3f-6dcb2b223681" (UID: "33d7bf6d-b03e-493b-9e3f-6dcb2b223681"). InnerVolumeSpecName "kube-api-access-wsvpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.698744 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsvpd\" (UniqueName: \"kubernetes.io/projected/33d7bf6d-b03e-493b-9e3f-6dcb2b223681-kube-api-access-wsvpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.699009 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvt2s\" (UniqueName: \"kubernetes.io/projected/20e1b396-c515-4176-9402-72f570001c08-kube-api-access-vvt2s\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:01 crc kubenswrapper[4884]: I1128 15:41:01.699150 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6z85s\" (UniqueName: \"kubernetes.io/projected/4326f7b3-d838-49f3-b54f-574a55b44de4-kube-api-access-6z85s\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:02 crc kubenswrapper[4884]: I1128 15:41:02.412434 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-tghnk" Nov 28 15:41:02 crc kubenswrapper[4884]: I1128 15:41:02.412438 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tj565" event={"ID":"727e8f17-29bf-4c2f-b91b-f26b036f86f4","Type":"ContainerStarted","Data":"d6114c43021b4cdc8ee40a171d9cdb567ac02848a9ade6dd09f970c6a21fc709"} Nov 28 15:41:02 crc kubenswrapper[4884]: I1128 15:41:02.412686 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-4qkr7" Nov 28 15:41:02 crc kubenswrapper[4884]: I1128 15:41:02.413520 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-npbhj" Nov 28 15:41:02 crc kubenswrapper[4884]: I1128 15:41:02.437842 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-tj565" podStartSLOduration=7.459911597 podStartE2EDuration="13.437826646s" podCreationTimestamp="2025-11-28 15:40:49 +0000 UTC" firstStartedPulling="2025-11-28 15:40:55.359024889 +0000 UTC m=+1294.921808710" lastFinishedPulling="2025-11-28 15:41:01.336939958 +0000 UTC m=+1300.899723759" observedRunningTime="2025-11-28 15:41:02.434265169 +0000 UTC m=+1301.997048980" watchObservedRunningTime="2025-11-28 15:41:02.437826646 +0000 UTC m=+1302.000610437" Nov 28 15:41:03 crc kubenswrapper[4884]: I1128 15:41:03.423813 4884 generic.go:334] "Generic (PLEG): container finished" podID="54b9e833-6baa-48ca-9b62-5b288f49c020" containerID="0ead29ea69f9cb39624ccf2cdc69491b89fbdb757e4248352d5579bf98013460" exitCode=0 Nov 28 15:41:03 crc kubenswrapper[4884]: I1128 15:41:03.425930 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m6njq" event={"ID":"54b9e833-6baa-48ca-9b62-5b288f49c020","Type":"ContainerDied","Data":"0ead29ea69f9cb39624ccf2cdc69491b89fbdb757e4248352d5579bf98013460"} Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.434706 4884 generic.go:334] "Generic (PLEG): container finished" podID="727e8f17-29bf-4c2f-b91b-f26b036f86f4" containerID="d6114c43021b4cdc8ee40a171d9cdb567ac02848a9ade6dd09f970c6a21fc709" exitCode=0 Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.434816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tj565" event={"ID":"727e8f17-29bf-4c2f-b91b-f26b036f86f4","Type":"ContainerDied","Data":"d6114c43021b4cdc8ee40a171d9cdb567ac02848a9ade6dd09f970c6a21fc709"} Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.845809 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m6njq" Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.955327 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data\") pod \"54b9e833-6baa-48ca-9b62-5b288f49c020\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.955707 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data\") pod \"54b9e833-6baa-48ca-9b62-5b288f49c020\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.955777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc2gz\" (UniqueName: \"kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz\") pod \"54b9e833-6baa-48ca-9b62-5b288f49c020\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.955907 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle\") pod \"54b9e833-6baa-48ca-9b62-5b288f49c020\" (UID: \"54b9e833-6baa-48ca-9b62-5b288f49c020\") " Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.967877 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "54b9e833-6baa-48ca-9b62-5b288f49c020" (UID: "54b9e833-6baa-48ca-9b62-5b288f49c020"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.968036 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz" (OuterVolumeSpecName: "kube-api-access-jc2gz") pod "54b9e833-6baa-48ca-9b62-5b288f49c020" (UID: "54b9e833-6baa-48ca-9b62-5b288f49c020"). InnerVolumeSpecName "kube-api-access-jc2gz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:04 crc kubenswrapper[4884]: I1128 15:41:04.990457 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54b9e833-6baa-48ca-9b62-5b288f49c020" (UID: "54b9e833-6baa-48ca-9b62-5b288f49c020"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.009798 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data" (OuterVolumeSpecName: "config-data") pod "54b9e833-6baa-48ca-9b62-5b288f49c020" (UID: "54b9e833-6baa-48ca-9b62-5b288f49c020"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.057837 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc2gz\" (UniqueName: \"kubernetes.io/projected/54b9e833-6baa-48ca-9b62-5b288f49c020-kube-api-access-jc2gz\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.057875 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.057884 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.057893 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b9e833-6baa-48ca-9b62-5b288f49c020-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.444978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m6njq" event={"ID":"54b9e833-6baa-48ca-9b62-5b288f49c020","Type":"ContainerDied","Data":"c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb"} Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.445371 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c10e47f41d521085a7680a41d93052519c4655e67371bb966602599183df25bb" Nov 28 15:41:05 crc kubenswrapper[4884]: I1128 15:41:05.445053 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m6njq" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.001014 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tj565" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094029 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094447 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094472 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094497 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="init" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094505 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="init" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094532 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f194cf7-2f65-4dde-842a-7b93fb8148b9" containerName="mariadb-account-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094541 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f194cf7-2f65-4dde-842a-7b93fb8148b9" containerName="mariadb-account-create" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094554 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d7bf6d-b03e-493b-9e3f-6dcb2b223681" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094562 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d7bf6d-b03e-493b-9e3f-6dcb2b223681" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094578 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="727e8f17-29bf-4c2f-b91b-f26b036f86f4" containerName="keystone-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094587 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="727e8f17-29bf-4c2f-b91b-f26b036f86f4" containerName="keystone-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094600 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b9e833-6baa-48ca-9b62-5b288f49c020" containerName="glance-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094607 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b9e833-6baa-48ca-9b62-5b288f49c020" containerName="glance-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094619 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e1b396-c515-4176-9402-72f570001c08" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094627 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e1b396-c515-4176-9402-72f570001c08" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: E1128 15:41:06.094637 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4326f7b3-d838-49f3-b54f-574a55b44de4" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094646 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4326f7b3-d838-49f3-b54f-574a55b44de4" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094823 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e1b396-c515-4176-9402-72f570001c08" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094839 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="33d7bf6d-b03e-493b-9e3f-6dcb2b223681" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094850 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b9e833-6baa-48ca-9b62-5b288f49c020" containerName="glance-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094860 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="727e8f17-29bf-4c2f-b91b-f26b036f86f4" containerName="keystone-db-sync" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094876 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="19503544-c3b2-4d1f-9abb-8af9baac851a" containerName="dnsmasq-dns" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094889 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4326f7b3-d838-49f3-b54f-574a55b44de4" containerName="mariadb-database-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.094898 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f194cf7-2f65-4dde-842a-7b93fb8148b9" containerName="mariadb-account-create" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.103330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.113725 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.123709 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle\") pod \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.123763 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data\") pod \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.123793 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7fwv\" (UniqueName: \"kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv\") pod \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\" (UID: \"727e8f17-29bf-4c2f-b91b-f26b036f86f4\") " Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.123976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.124021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.124046 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.124073 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.124136 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs576\" (UniqueName: \"kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.124167 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.137381 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv" (OuterVolumeSpecName: "kube-api-access-q7fwv") pod "727e8f17-29bf-4c2f-b91b-f26b036f86f4" (UID: "727e8f17-29bf-4c2f-b91b-f26b036f86f4"). InnerVolumeSpecName "kube-api-access-q7fwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.191302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "727e8f17-29bf-4c2f-b91b-f26b036f86f4" (UID: "727e8f17-29bf-4c2f-b91b-f26b036f86f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225327 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225357 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225391 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs576\" (UniqueName: \"kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225423 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225584 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.225607 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7fwv\" (UniqueName: \"kubernetes.io/projected/727e8f17-29bf-4c2f-b91b-f26b036f86f4-kube-api-access-q7fwv\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.227335 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.228082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.229080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.229349 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.230011 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.249158 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data" (OuterVolumeSpecName: "config-data") pod "727e8f17-29bf-4c2f-b91b-f26b036f86f4" (UID: "727e8f17-29bf-4c2f-b91b-f26b036f86f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.250256 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs576\" (UniqueName: \"kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576\") pod \"dnsmasq-dns-895cf5cf-5rjxb\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.327056 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/727e8f17-29bf-4c2f-b91b-f26b036f86f4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.425563 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.456321 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tj565" event={"ID":"727e8f17-29bf-4c2f-b91b-f26b036f86f4","Type":"ContainerDied","Data":"a368c7d988a385faeb083886d7393c8e9a8b4d6bd8c1758d80624529c49dd252"} Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.456358 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a368c7d988a385faeb083886d7393c8e9a8b4d6bd8c1758d80624529c49dd252" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.456377 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tj565" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.608143 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.648245 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.649812 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.666737 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.679171 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-rslcd"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.680266 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.682644 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.682801 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pjtcl" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.682903 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.683062 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.749373 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.749501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.754773 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rslcd"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755241 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755268 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755302 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755380 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755468 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jmwb\" (UniqueName: \"kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755534 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.755685 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw98q\" (UniqueName: \"kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860094 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860172 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860332 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jmwb\" (UniqueName: \"kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860383 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860434 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw98q\" (UniqueName: \"kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.860530 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.861698 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.862018 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.862775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.863654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.864392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.865756 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.866301 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.866981 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.877365 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.881674 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.888527 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw98q\" (UniqueName: \"kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q\") pod \"dnsmasq-dns-6c9c9f998c-n98cf\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.888856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jmwb\" (UniqueName: \"kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb\") pod \"keystone-bootstrap-rslcd\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.919179 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.921892 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.927344 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.927411 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.931282 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.931864 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.937639 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.985937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mcqh\" (UniqueName: \"kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.985988 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.986011 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.986036 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.986074 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.986091 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:06 crc kubenswrapper[4884]: I1128 15:41:06.986163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.022717 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.025224 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.065416 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.090652 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.091052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mcqh\" (UniqueName: \"kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.093750 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.093830 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.093885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.093923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094010 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj6z9\" (UniqueName: \"kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094057 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094081 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094180 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094401 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.094510 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.095763 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.102819 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.109370 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.115301 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.115950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.116380 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.125213 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-sfxfp"] Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.126333 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.130310 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.130493 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.132473 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gg9hf" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.133530 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mcqh\" (UniqueName: \"kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh\") pod \"ceilometer-0\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " pod="openstack/ceilometer-0" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.170612 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-sfxfp"] Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.180397 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.197252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.197449 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.197475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.197935 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.198021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.198073 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.198776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199306 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199540 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199614 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj6z9\" (UniqueName: \"kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.199704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcb26\" (UniqueName: \"kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.200615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.201296 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:07 crc kubenswrapper[4884]: I1128 15:41:07.201403 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.232786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj6z9\" (UniqueName: \"kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9\") pod \"dnsmasq-dns-57c957c4ff-wp9nt\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.303837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcb26\" (UniqueName: \"kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.303889 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.303922 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.303987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.304020 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.304384 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.306774 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.307081 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.308696 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.318752 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcb26\" (UniqueName: \"kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26\") pod \"placement-db-sync-sfxfp\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.375577 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.382637 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.467179 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" event={"ID":"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3","Type":"ContainerStarted","Data":"4cbc76fb65c9a551beccf6074a91beba20ef4b472157488a1d38094da003904a"} Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.506761 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.753265 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.754882 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.758811 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmtm9" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.759199 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.761793 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.782722 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815372 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815825 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815886 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815922 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.815947 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.816008 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwfn4\" (UniqueName: \"kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.824987 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.826587 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.829842 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.836531 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918200 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmbcz\" (UniqueName: \"kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918285 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918306 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwfn4\" (UniqueName: \"kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918345 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918403 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918457 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918495 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918522 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918688 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.918919 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.919239 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.924576 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.925827 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.925976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.939208 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwfn4\" (UniqueName: \"kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:07.940618 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020359 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmbcz\" (UniqueName: \"kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020425 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020488 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020742 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020782 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020804 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.020951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.023335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.023756 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.024588 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.028099 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.032962 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.036392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmbcz\" (UniqueName: \"kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.046999 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.095373 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.164249 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.548408 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" event={"ID":"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3","Type":"ContainerStarted","Data":"b0011ee35a3d9360333f4fd6d13ec534ea09e5d63b8319fb43f48c4fe5c944ef"} Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:08.548870 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" podUID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" containerName="init" containerID="cri-o://b0011ee35a3d9360333f4fd6d13ec534ea09e5d63b8319fb43f48c4fe5c944ef" gracePeriod=10 Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.280209 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/telemetry-operator-controller-manager-695797c565-zfmh2" podUID="53444326-ebe6-45f5-a086-63ef03d1533a" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.562219 4884 generic.go:334] "Generic (PLEG): container finished" podID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" containerID="b0011ee35a3d9360333f4fd6d13ec534ea09e5d63b8319fb43f48c4fe5c944ef" exitCode=0 Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.562276 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" event={"ID":"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3","Type":"ContainerDied","Data":"b0011ee35a3d9360333f4fd6d13ec534ea09e5d63b8319fb43f48c4fe5c944ef"} Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.708893 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-0078-account-create-z2ttf"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.710686 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.713744 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.724590 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0078-account-create-z2ttf"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.768276 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdx8l\" (UniqueName: \"kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l\") pod \"cinder-0078-account-create-z2ttf\" (UID: \"8a17a6e8-da33-4343-bc08-feb493224228\") " pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.818083 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-783f-account-create-6222f"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.819363 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.822535 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.845055 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-783f-account-create-6222f"] Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.869553 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdx8l\" (UniqueName: \"kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l\") pod \"cinder-0078-account-create-z2ttf\" (UID: \"8a17a6e8-da33-4343-bc08-feb493224228\") " pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.869597 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t582t\" (UniqueName: \"kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t\") pod \"barbican-783f-account-create-6222f\" (UID: \"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8\") " pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.903039 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdx8l\" (UniqueName: \"kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l\") pod \"cinder-0078-account-create-z2ttf\" (UID: \"8a17a6e8-da33-4343-bc08-feb493224228\") " pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.946077 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:09 crc kubenswrapper[4884]: W1128 15:41:09.961896 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafbd0552_ad38_45fa_ac2a_0bb789c3fa3f.slice/crio-763c0323bbcb538c5e573d393fcc1d48253847703ceb23b4eb91272b185ba40b WatchSource:0}: Error finding container 763c0323bbcb538c5e573d393fcc1d48253847703ceb23b4eb91272b185ba40b: Status 404 returned error can't find the container with id 763c0323bbcb538c5e573d393fcc1d48253847703ceb23b4eb91272b185ba40b Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.971515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t582t\" (UniqueName: \"kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t\") pod \"barbican-783f-account-create-6222f\" (UID: \"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8\") " pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:09 crc kubenswrapper[4884]: I1128 15:41:09.998357 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t582t\" (UniqueName: \"kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t\") pod \"barbican-783f-account-create-6222f\" (UID: \"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8\") " pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.006244 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.023695 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-39ed-account-create-szh2k"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.031345 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.034206 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.038067 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.053280 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-39ed-account-create-szh2k"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.075019 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s4ml\" (UniqueName: \"kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml\") pod \"neutron-39ed-account-create-szh2k\" (UID: \"4b9d1dde-9e0b-47bb-8046-a5687c344d9b\") " pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.106068 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.135080 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.145610 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190371 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190433 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190458 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190490 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs576\" (UniqueName: \"kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190635 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.190685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb\") pod \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\" (UID: \"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3\") " Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.191387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s4ml\" (UniqueName: \"kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml\") pod \"neutron-39ed-account-create-szh2k\" (UID: \"4b9d1dde-9e0b-47bb-8046-a5687c344d9b\") " pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.203439 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576" (OuterVolumeSpecName: "kube-api-access-vs576") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "kube-api-access-vs576". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.246692 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s4ml\" (UniqueName: \"kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml\") pod \"neutron-39ed-account-create-szh2k\" (UID: \"4b9d1dde-9e0b-47bb-8046-a5687c344d9b\") " pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.247315 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.263210 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.267039 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config" (OuterVolumeSpecName: "config") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.275330 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.276631 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.294160 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.294186 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.294196 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.294205 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs576\" (UniqueName: \"kubernetes.io/projected/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-kube-api-access-vs576\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.301261 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.323449 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" (UID: "333f56a4-9727-4a9d-a8a3-cbb9ffca80b3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.335436 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rslcd"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.344114 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-sfxfp"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.355377 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.364029 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.395308 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.395339 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.430718 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.465590 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.589484 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerStarted","Data":"ab180b74066ea56b59971fbb2c09251ace27208b4e70aa13ffa30bd8526b7435"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.591276 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" event={"ID":"333f56a4-9727-4a9d-a8a3-cbb9ffca80b3","Type":"ContainerDied","Data":"4cbc76fb65c9a551beccf6074a91beba20ef4b472157488a1d38094da003904a"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.591283 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-5rjxb" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.591308 4884 scope.go:117] "RemoveContainer" containerID="b0011ee35a3d9360333f4fd6d13ec534ea09e5d63b8319fb43f48c4fe5c944ef" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.602256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" event={"ID":"1a186df2-95e5-4f8c-9184-0eeef5af978c","Type":"ContainerStarted","Data":"f8a6af4d9cf9bd80bc19d3a278e668bdeda0a20140dd18918473fa561a1622dc"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.611955 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rslcd" event={"ID":"bd79eeaf-5466-462c-a066-4386050eab44","Type":"ContainerStarted","Data":"32b153b1fc85d3fe8910441f2c12c370af56edd711438703f1f5b161624d6e61"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.614547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerStarted","Data":"153c1951e5b367e5652202322670e3853f25bdb7a6102a7bdefe4553a33bdba8"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.617551 4884 generic.go:334] "Generic (PLEG): container finished" podID="afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" containerID="b20f8882c1ff63185d65b3b6fef7391be624679ba4dd6a3888e818a40242fd15" exitCode=0 Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.617595 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" event={"ID":"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f","Type":"ContainerDied","Data":"b20f8882c1ff63185d65b3b6fef7391be624679ba4dd6a3888e818a40242fd15"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.617612 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" event={"ID":"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f","Type":"ContainerStarted","Data":"763c0323bbcb538c5e573d393fcc1d48253847703ceb23b4eb91272b185ba40b"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.619631 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-sfxfp" event={"ID":"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c","Type":"ContainerStarted","Data":"fa8ccdab4ea562c4d9019211fb206a64659436666f31948e501016054106d782"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.621611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerStarted","Data":"1ca8f5336b4b72a678478b6c246fefa6295971b4eb7fc8c621e9914c69e9840d"} Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.633093 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0078-account-create-z2ttf"] Nov 28 15:41:10 crc kubenswrapper[4884]: W1128 15:41:10.655358 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a17a6e8_da33_4343_bc08_feb493224228.slice/crio-2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf WatchSource:0}: Error finding container 2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf: Status 404 returned error can't find the container with id 2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.675535 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.679339 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-5rjxb"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.707429 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" path="/var/lib/kubelet/pods/333f56a4-9727-4a9d-a8a3-cbb9ffca80b3/volumes" Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.787889 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-39ed-account-create-szh2k"] Nov 28 15:41:10 crc kubenswrapper[4884]: I1128 15:41:10.797262 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-783f-account-create-6222f"] Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.202449 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332166 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332211 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332239 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw98q\" (UniqueName: \"kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332261 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332335 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.332398 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb\") pod \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\" (UID: \"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f\") " Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.349840 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q" (OuterVolumeSpecName: "kube-api-access-zw98q") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "kube-api-access-zw98q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.384711 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.388952 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.390153 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.391264 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config" (OuterVolumeSpecName: "config") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.395690 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" (UID: "afbd0552-ad38-45fa-ac2a-0bb789c3fa3f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435361 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435398 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435411 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435422 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw98q\" (UniqueName: \"kubernetes.io/projected/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-kube-api-access-zw98q\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435433 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.435443 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.640344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerStarted","Data":"5c423ac07b468852809bfe25fa5362940c4a0b1bdb37da939a56e932c83aa56f"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.645637 4884 generic.go:334] "Generic (PLEG): container finished" podID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerID="6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4" exitCode=0 Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.645708 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" event={"ID":"1a186df2-95e5-4f8c-9184-0eeef5af978c","Type":"ContainerDied","Data":"6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.647438 4884 generic.go:334] "Generic (PLEG): container finished" podID="4b9d1dde-9e0b-47bb-8046-a5687c344d9b" containerID="72697e6b2a753930910f59bee8df04dae210b43500f84698945298c337f4adf6" exitCode=0 Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.647481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-39ed-account-create-szh2k" event={"ID":"4b9d1dde-9e0b-47bb-8046-a5687c344d9b","Type":"ContainerDied","Data":"72697e6b2a753930910f59bee8df04dae210b43500f84698945298c337f4adf6"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.647498 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-39ed-account-create-szh2k" event={"ID":"4b9d1dde-9e0b-47bb-8046-a5687c344d9b","Type":"ContainerStarted","Data":"e7acbb189d63dffe4f4272cb53b8cf1345c64c0b749016d6ce1965ffa197dd1e"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.665173 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a17a6e8-da33-4343-bc08-feb493224228" containerID="b672e9d20e9ae21776b687169a5546d95f3c175cd1a50b1e07fe0423b506e7fa" exitCode=0 Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.665236 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0078-account-create-z2ttf" event={"ID":"8a17a6e8-da33-4343-bc08-feb493224228","Type":"ContainerDied","Data":"b672e9d20e9ae21776b687169a5546d95f3c175cd1a50b1e07fe0423b506e7fa"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.665260 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0078-account-create-z2ttf" event={"ID":"8a17a6e8-da33-4343-bc08-feb493224228","Type":"ContainerStarted","Data":"2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.667052 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" containerID="fdbae1a079e558ce0feb21ed3c6269335bffc9b787b3cc81facbc980945f77ff" exitCode=0 Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.667160 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-783f-account-create-6222f" event={"ID":"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8","Type":"ContainerDied","Data":"fdbae1a079e558ce0feb21ed3c6269335bffc9b787b3cc81facbc980945f77ff"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.667224 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-783f-account-create-6222f" event={"ID":"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8","Type":"ContainerStarted","Data":"76d7546926ad985ede4c5f430599a01ecfbae4ec395388b75416ac6500fad4cc"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.675394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rslcd" event={"ID":"bd79eeaf-5466-462c-a066-4386050eab44","Type":"ContainerStarted","Data":"2d1411fe8264af95e0eb8318d16f2b0fdd715b64f44342f8230637454ca0ab1c"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.687649 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerStarted","Data":"cdd46bb27e6ec218e620f1dbce589d5e6075a7c8eef50ce5a2fff1126f1f554e"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.695836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" event={"ID":"afbd0552-ad38-45fa-ac2a-0bb789c3fa3f","Type":"ContainerDied","Data":"763c0323bbcb538c5e573d393fcc1d48253847703ceb23b4eb91272b185ba40b"} Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.695879 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-n98cf" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.695891 4884 scope.go:117] "RemoveContainer" containerID="b20f8882c1ff63185d65b3b6fef7391be624679ba4dd6a3888e818a40242fd15" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.770281 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-rslcd" podStartSLOduration=5.77025547 podStartE2EDuration="5.77025547s" podCreationTimestamp="2025-11-28 15:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:11.736554788 +0000 UTC m=+1311.299338589" watchObservedRunningTime="2025-11-28 15:41:11.77025547 +0000 UTC m=+1311.333039271" Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.810749 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:11 crc kubenswrapper[4884]: I1128 15:41:11.818787 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-n98cf"] Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.698942 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" path="/var/lib/kubelet/pods/afbd0552-ad38-45fa-ac2a-0bb789c3fa3f/volumes" Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.707581 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerStarted","Data":"1a30a3d4bf359f5fc49d3667630169dd4e6bd464b48ce6bba39f4432dcb114e2"} Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.707718 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-log" containerID="cri-o://5c423ac07b468852809bfe25fa5362940c4a0b1bdb37da939a56e932c83aa56f" gracePeriod=30 Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.707821 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-httpd" containerID="cri-o://1a30a3d4bf359f5fc49d3667630169dd4e6bd464b48ce6bba39f4432dcb114e2" gracePeriod=30 Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.715140 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" event={"ID":"1a186df2-95e5-4f8c-9184-0eeef5af978c","Type":"ContainerStarted","Data":"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c"} Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.715867 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.720186 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-log" containerID="cri-o://cdd46bb27e6ec218e620f1dbce589d5e6075a7c8eef50ce5a2fff1126f1f554e" gracePeriod=30 Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.720230 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-httpd" containerID="cri-o://b31224d213d702a255721531cd9fda767940f9805e2530e88f7d135ed3787c8b" gracePeriod=30 Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.720181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerStarted","Data":"b31224d213d702a255721531cd9fda767940f9805e2530e88f7d135ed3787c8b"} Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.736368 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.736350402 podStartE2EDuration="6.736350402s" podCreationTimestamp="2025-11-28 15:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:12.730304514 +0000 UTC m=+1312.293088315" watchObservedRunningTime="2025-11-28 15:41:12.736350402 +0000 UTC m=+1312.299134193" Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.756565 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" podStartSLOduration=6.756546774 podStartE2EDuration="6.756546774s" podCreationTimestamp="2025-11-28 15:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:12.750939017 +0000 UTC m=+1312.313722828" watchObservedRunningTime="2025-11-28 15:41:12.756546774 +0000 UTC m=+1312.319330575" Nov 28 15:41:12 crc kubenswrapper[4884]: I1128 15:41:12.780681 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.780662041 podStartE2EDuration="6.780662041s" podCreationTimestamp="2025-11-28 15:41:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:12.77975454 +0000 UTC m=+1312.342538351" watchObservedRunningTime="2025-11-28 15:41:12.780662041 +0000 UTC m=+1312.343445842" Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.733720 4884 generic.go:334] "Generic (PLEG): container finished" podID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerID="b31224d213d702a255721531cd9fda767940f9805e2530e88f7d135ed3787c8b" exitCode=0 Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.734140 4884 generic.go:334] "Generic (PLEG): container finished" podID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerID="cdd46bb27e6ec218e620f1dbce589d5e6075a7c8eef50ce5a2fff1126f1f554e" exitCode=143 Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.734182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerDied","Data":"b31224d213d702a255721531cd9fda767940f9805e2530e88f7d135ed3787c8b"} Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.734212 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerDied","Data":"cdd46bb27e6ec218e620f1dbce589d5e6075a7c8eef50ce5a2fff1126f1f554e"} Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.737919 4884 generic.go:334] "Generic (PLEG): container finished" podID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerID="1a30a3d4bf359f5fc49d3667630169dd4e6bd464b48ce6bba39f4432dcb114e2" exitCode=0 Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.737941 4884 generic.go:334] "Generic (PLEG): container finished" podID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerID="5c423ac07b468852809bfe25fa5362940c4a0b1bdb37da939a56e932c83aa56f" exitCode=143 Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.738007 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerDied","Data":"1a30a3d4bf359f5fc49d3667630169dd4e6bd464b48ce6bba39f4432dcb114e2"} Nov 28 15:41:13 crc kubenswrapper[4884]: I1128 15:41:13.738068 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerDied","Data":"5c423ac07b468852809bfe25fa5362940c4a0b1bdb37da939a56e932c83aa56f"} Nov 28 15:41:14 crc kubenswrapper[4884]: I1128 15:41:14.752350 4884 generic.go:334] "Generic (PLEG): container finished" podID="bd79eeaf-5466-462c-a066-4386050eab44" containerID="2d1411fe8264af95e0eb8318d16f2b0fdd715b64f44342f8230637454ca0ab1c" exitCode=0 Nov 28 15:41:14 crc kubenswrapper[4884]: I1128 15:41:14.752452 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rslcd" event={"ID":"bd79eeaf-5466-462c-a066-4386050eab44","Type":"ContainerDied","Data":"2d1411fe8264af95e0eb8318d16f2b0fdd715b64f44342f8230637454ca0ab1c"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.153756 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.167541 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.170729 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.171083 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.175421 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326564 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326654 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwfn4\" (UniqueName: \"kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326705 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326742 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmbcz\" (UniqueName: \"kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326809 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326849 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326864 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdx8l\" (UniqueName: \"kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l\") pod \"8a17a6e8-da33-4343-bc08-feb493224228\" (UID: \"8a17a6e8-da33-4343-bc08-feb493224228\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326884 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s4ml\" (UniqueName: \"kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml\") pod \"4b9d1dde-9e0b-47bb-8046-a5687c344d9b\" (UID: \"4b9d1dde-9e0b-47bb-8046-a5687c344d9b\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326903 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326931 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326947 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.326981 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle\") pod \"5d3f83f3-8d54-4c16-a768-614f531231ef\" (UID: \"5d3f83f3-8d54-4c16-a768-614f531231ef\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.327012 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run\") pod \"f82d7a01-e49c-47ae-862b-e4169002dd7c\" (UID: \"f82d7a01-e49c-47ae-862b-e4169002dd7c\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.327034 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t582t\" (UniqueName: \"kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t\") pod \"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8\" (UID: \"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8\") " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.328030 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs" (OuterVolumeSpecName: "logs") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.328090 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.328495 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.328748 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs" (OuterVolumeSpecName: "logs") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.335055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4" (OuterVolumeSpecName: "kube-api-access-zwfn4") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "kube-api-access-zwfn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.335061 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts" (OuterVolumeSpecName: "scripts") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.336585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml" (OuterVolumeSpecName: "kube-api-access-5s4ml") pod "4b9d1dde-9e0b-47bb-8046-a5687c344d9b" (UID: "4b9d1dde-9e0b-47bb-8046-a5687c344d9b"). InnerVolumeSpecName "kube-api-access-5s4ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.337287 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts" (OuterVolumeSpecName: "scripts") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.337304 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.338353 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l" (OuterVolumeSpecName: "kube-api-access-cdx8l") pod "8a17a6e8-da33-4343-bc08-feb493224228" (UID: "8a17a6e8-da33-4343-bc08-feb493224228"). InnerVolumeSpecName "kube-api-access-cdx8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.339244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.340493 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t" (OuterVolumeSpecName: "kube-api-access-t582t") pod "4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" (UID: "4a72f0d8-d60b-4f12-97ed-9ddaca128ff8"). InnerVolumeSpecName "kube-api-access-t582t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.369845 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz" (OuterVolumeSpecName: "kube-api-access-jmbcz") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "kube-api-access-jmbcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.380171 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.382292 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data" (OuterVolumeSpecName: "config-data") pod "5d3f83f3-8d54-4c16-a768-614f531231ef" (UID: "5d3f83f3-8d54-4c16-a768-614f531231ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.382950 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.422072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data" (OuterVolumeSpecName: "config-data") pod "f82d7a01-e49c-47ae-862b-e4169002dd7c" (UID: "f82d7a01-e49c-47ae-862b-e4169002dd7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429508 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429541 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429552 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429565 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429574 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f82d7a01-e49c-47ae-862b-e4169002dd7c-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429584 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t582t\" (UniqueName: \"kubernetes.io/projected/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8-kube-api-access-t582t\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429592 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429601 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d3f83f3-8d54-4c16-a768-614f531231ef-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429609 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f82d7a01-e49c-47ae-862b-e4169002dd7c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429616 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwfn4\" (UniqueName: \"kubernetes.io/projected/5d3f83f3-8d54-4c16-a768-614f531231ef-kube-api-access-zwfn4\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429624 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429631 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmbcz\" (UniqueName: \"kubernetes.io/projected/f82d7a01-e49c-47ae-862b-e4169002dd7c-kube-api-access-jmbcz\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429660 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429669 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d3f83f3-8d54-4c16-a768-614f531231ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429682 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429691 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdx8l\" (UniqueName: \"kubernetes.io/projected/8a17a6e8-da33-4343-bc08-feb493224228-kube-api-access-cdx8l\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.429701 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s4ml\" (UniqueName: \"kubernetes.io/projected/4b9d1dde-9e0b-47bb-8046-a5687c344d9b-kube-api-access-5s4ml\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.448224 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.455107 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.534925 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.534965 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.767996 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f82d7a01-e49c-47ae-862b-e4169002dd7c","Type":"ContainerDied","Data":"153c1951e5b367e5652202322670e3853f25bdb7a6102a7bdefe4553a33bdba8"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.768066 4884 scope.go:117] "RemoveContainer" containerID="b31224d213d702a255721531cd9fda767940f9805e2530e88f7d135ed3787c8b" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.768257 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.781396 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-39ed-account-create-szh2k" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.781401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-39ed-account-create-szh2k" event={"ID":"4b9d1dde-9e0b-47bb-8046-a5687c344d9b","Type":"ContainerDied","Data":"e7acbb189d63dffe4f4272cb53b8cf1345c64c0b749016d6ce1965ffa197dd1e"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.781458 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7acbb189d63dffe4f4272cb53b8cf1345c64c0b749016d6ce1965ffa197dd1e" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.796744 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0078-account-create-z2ttf" event={"ID":"8a17a6e8-da33-4343-bc08-feb493224228","Type":"ContainerDied","Data":"2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.796788 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e99aa2e38e2e013ab1dd9110fa3c1fadd8d66c343f48406ccf1772fbaa3d8cf" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.796862 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0078-account-create-z2ttf" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.809996 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5d3f83f3-8d54-4c16-a768-614f531231ef","Type":"ContainerDied","Data":"ab180b74066ea56b59971fbb2c09251ace27208b4e70aa13ffa30bd8526b7435"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.810126 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.819687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-783f-account-create-6222f" event={"ID":"4a72f0d8-d60b-4f12-97ed-9ddaca128ff8","Type":"ContainerDied","Data":"76d7546926ad985ede4c5f430599a01ecfbae4ec395388b75416ac6500fad4cc"} Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.819720 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-783f-account-create-6222f" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.819739 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76d7546926ad985ede4c5f430599a01ecfbae4ec395388b75416ac6500fad4cc" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.830741 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.841132 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869025 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869528 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869552 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869567 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869575 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869590 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869600 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869612 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869620 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869632 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a17a6e8-da33-4343-bc08-feb493224228" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869640 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a17a6e8-da33-4343-bc08-feb493224228" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869659 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9d1dde-9e0b-47bb-8046-a5687c344d9b" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869667 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9d1dde-9e0b-47bb-8046-a5687c344d9b" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869677 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869684 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869700 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869708 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: E1128 15:41:15.869745 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869753 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869976 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.869992 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870004 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870043 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="afbd0552-ad38-45fa-ac2a-0bb789c3fa3f" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870057 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" containerName="glance-httpd" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870107 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a17a6e8-da33-4343-bc08-feb493224228" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870120 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" containerName="glance-log" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870141 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b9d1dde-9e0b-47bb-8046-a5687c344d9b" containerName="mariadb-account-create" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.870152 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="333f56a4-9727-4a9d-a8a3-cbb9ffca80b3" containerName="init" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.871928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.873874 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.874130 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.874171 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmtm9" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.889204 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.930904 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.943043 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.953617 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.955138 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.957766 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:41:15 crc kubenswrapper[4884]: I1128 15:41:15.971728 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.048926 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.048986 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8zt8\" (UniqueName: \"kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.049022 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.049093 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.049133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.049151 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.049174 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151055 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151120 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151209 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151238 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151255 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151270 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151343 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151358 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151380 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7p64\" (UniqueName: \"kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8zt8\" (UniqueName: \"kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151417 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.151826 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.152799 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.152924 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.157761 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.160677 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.169173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.172270 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8zt8\" (UniqueName: \"kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.183881 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.203408 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253294 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7p64\" (UniqueName: \"kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253347 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253396 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253447 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253463 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.253932 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.254159 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.254236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.266012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.273856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.274715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.316088 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7p64\" (UniqueName: \"kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.350894 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.579893 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.584759 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.668979 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.701703 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d3f83f3-8d54-4c16-a768-614f531231ef" path="/var/lib/kubelet/pods/5d3f83f3-8d54-4c16-a768-614f531231ef/volumes" Nov 28 15:41:16 crc kubenswrapper[4884]: I1128 15:41:16.703585 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f82d7a01-e49c-47ae-862b-e4169002dd7c" path="/var/lib/kubelet/pods/f82d7a01-e49c-47ae-862b-e4169002dd7c/volumes" Nov 28 15:41:17 crc kubenswrapper[4884]: I1128 15:41:17.384278 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:41:17 crc kubenswrapper[4884]: I1128 15:41:17.454316 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:41:17 crc kubenswrapper[4884]: I1128 15:41:17.454684 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="dnsmasq-dns" containerID="cri-o://6055a9d2a5ed4e74724da67b6844fa4ecb94ae7d26d6c9b96565e587ae4299c3" gracePeriod=10 Nov 28 15:41:17 crc kubenswrapper[4884]: I1128 15:41:17.840254 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerID="6055a9d2a5ed4e74724da67b6844fa4ecb94ae7d26d6c9b96565e587ae4299c3" exitCode=0 Nov 28 15:41:17 crc kubenswrapper[4884]: I1128 15:41:17.840325 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" event={"ID":"4c4f3feb-a977-4c65-8c72-b638fa15027a","Type":"ContainerDied","Data":"6055a9d2a5ed4e74724da67b6844fa4ecb94ae7d26d6c9b96565e587ae4299c3"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.001396 4884 scope.go:117] "RemoveContainer" containerID="cdd46bb27e6ec218e620f1dbce589d5e6075a7c8eef50ce5a2fff1126f1f554e" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.085390 4884 scope.go:117] "RemoveContainer" containerID="1a30a3d4bf359f5fc49d3667630169dd4e6bd464b48ce6bba39f4432dcb114e2" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.232261 4884 scope.go:117] "RemoveContainer" containerID="5c423ac07b468852809bfe25fa5362940c4a0b1bdb37da939a56e932c83aa56f" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.257865 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.302231 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.388992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389476 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389518 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389555 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389599 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58wjp\" (UniqueName: \"kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389636 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389705 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389762 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389798 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jmwb\" (UniqueName: \"kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.389945 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts\") pod \"bd79eeaf-5466-462c-a066-4386050eab44\" (UID: \"bd79eeaf-5466-462c-a066-4386050eab44\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.390004 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc\") pod \"4c4f3feb-a977-4c65-8c72-b638fa15027a\" (UID: \"4c4f3feb-a977-4c65-8c72-b638fa15027a\") " Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.398220 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.405206 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts" (OuterVolumeSpecName: "scripts") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.406267 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp" (OuterVolumeSpecName: "kube-api-access-58wjp") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "kube-api-access-58wjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.408100 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.418882 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb" (OuterVolumeSpecName: "kube-api-access-6jmwb") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "kube-api-access-6jmwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.452170 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data" (OuterVolumeSpecName: "config-data") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.469049 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.473237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd79eeaf-5466-462c-a066-4386050eab44" (UID: "bd79eeaf-5466-462c-a066-4386050eab44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.481526 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.483726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.483885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config" (OuterVolumeSpecName: "config") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.491348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.492868 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.492930 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.492945 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.492959 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.492971 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493011 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493024 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58wjp\" (UniqueName: \"kubernetes.io/projected/4c4f3feb-a977-4c65-8c72-b638fa15027a-kube-api-access-58wjp\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493036 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493051 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493082 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bd79eeaf-5466-462c-a066-4386050eab44-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.493112 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jmwb\" (UniqueName: \"kubernetes.io/projected/bd79eeaf-5466-462c-a066-4386050eab44-kube-api-access-6jmwb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.494608 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4c4f3feb-a977-4c65-8c72-b638fa15027a" (UID: "4c4f3feb-a977-4c65-8c72-b638fa15027a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.594467 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c4f3feb-a977-4c65-8c72-b638fa15027a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.780290 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:18 crc kubenswrapper[4884]: W1128 15:41:18.784499 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9eebed7a_4ad0_4cbf_818c_d69d419d2c86.slice/crio-90fd470302e9b2d789f554b413663696e18e9ff8bbc45535afae40302509eb83 WatchSource:0}: Error finding container 90fd470302e9b2d789f554b413663696e18e9ff8bbc45535afae40302509eb83: Status 404 returned error can't find the container with id 90fd470302e9b2d789f554b413663696e18e9ff8bbc45535afae40302509eb83 Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.852843 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerStarted","Data":"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.857504 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerStarted","Data":"90fd470302e9b2d789f554b413663696e18e9ff8bbc45535afae40302509eb83"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.859279 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerStarted","Data":"c1a634d16ef455d8bf0173bac1857436b35f910ee698ef655588fdc7428a5f4f"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.860773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" event={"ID":"4c4f3feb-a977-4c65-8c72-b638fa15027a","Type":"ContainerDied","Data":"c3212654d82c894f719afda98bc6acb51e1f82a376691311cb4bfc935f36eb7b"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.860814 4884 scope.go:117] "RemoveContainer" containerID="6055a9d2a5ed4e74724da67b6844fa4ecb94ae7d26d6c9b96565e587ae4299c3" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.860838 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-kvqlh" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.863609 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rslcd" event={"ID":"bd79eeaf-5466-462c-a066-4386050eab44","Type":"ContainerDied","Data":"32b153b1fc85d3fe8910441f2c12c370af56edd711438703f1f5b161624d6e61"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.863634 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32b153b1fc85d3fe8910441f2c12c370af56edd711438703f1f5b161624d6e61" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.863707 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rslcd" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.869974 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-sfxfp" event={"ID":"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c","Type":"ContainerStarted","Data":"c35862251dd7fa84424b1efdd78c3b8680af8eed8b5adf29edc19bd3bd2ec964"} Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.890292 4884 scope.go:117] "RemoveContainer" containerID="e848dab97da0f5a302adae56e051c89f8c322c97e30a1719d8d3a88bf59eabd4" Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.908002 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.934410 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-kvqlh"] Nov 28 15:41:18 crc kubenswrapper[4884]: I1128 15:41:18.946035 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-sfxfp" podStartSLOduration=4.181889547 podStartE2EDuration="11.94601727s" podCreationTimestamp="2025-11-28 15:41:07 +0000 UTC" firstStartedPulling="2025-11-28 15:41:10.345143069 +0000 UTC m=+1309.907926870" lastFinishedPulling="2025-11-28 15:41:18.109270792 +0000 UTC m=+1317.672054593" observedRunningTime="2025-11-28 15:41:18.918451708 +0000 UTC m=+1318.481235509" watchObservedRunningTime="2025-11-28 15:41:18.94601727 +0000 UTC m=+1318.508801071" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.425532 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-rslcd"] Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.431588 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-rslcd"] Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.537668 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-n5pb8"] Nov 28 15:41:19 crc kubenswrapper[4884]: E1128 15:41:19.538569 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.538612 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4884]: E1128 15:41:19.538629 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.538683 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="init" Nov 28 15:41:19 crc kubenswrapper[4884]: E1128 15:41:19.538724 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd79eeaf-5466-462c-a066-4386050eab44" containerName="keystone-bootstrap" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.538734 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd79eeaf-5466-462c-a066-4386050eab44" containerName="keystone-bootstrap" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.539053 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd79eeaf-5466-462c-a066-4386050eab44" containerName="keystone-bootstrap" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.539115 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" containerName="dnsmasq-dns" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.540027 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.545080 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.545366 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.545556 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pjtcl" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.545659 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.551686 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n5pb8"] Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.614816 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.614888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvvh6\" (UniqueName: \"kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.614942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.615052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.615110 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.615137 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716526 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716585 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716630 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvvh6\" (UniqueName: \"kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.716661 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.724176 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.724222 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.725066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.740552 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.747586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.766073 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvvh6\" (UniqueName: \"kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6\") pod \"keystone-bootstrap-n5pb8\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.866558 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.881981 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerStarted","Data":"ce4a72ebff046a5bf51177d2c58b8ab34a666bbd00f08e944bdf366849fd0137"} Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.883938 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerStarted","Data":"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef"} Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.883967 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerStarted","Data":"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a"} Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.884126 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-log" containerID="cri-o://02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" gracePeriod=30 Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.884635 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-httpd" containerID="cri-o://843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" gracePeriod=30 Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.914340 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.914317845 podStartE2EDuration="4.914317845s" podCreationTimestamp="2025-11-28 15:41:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:19.901720748 +0000 UTC m=+1319.464504549" watchObservedRunningTime="2025-11-28 15:41:19.914317845 +0000 UTC m=+1319.477101656" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.973955 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-bvnxp"] Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.975287 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.979882 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.980670 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.981945 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-24xq8" Nov 28 15:41:19 crc kubenswrapper[4884]: I1128 15:41:19.985528 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-bvnxp"] Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.122937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.123303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h4kt\" (UniqueName: \"kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.123375 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.123458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.123633 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.123731 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.128909 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-r4pfq"] Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.130080 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.133819 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.133890 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-hkn9p" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.138491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r4pfq"] Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225353 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjgtj\" (UniqueName: \"kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225457 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225473 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225500 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225528 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225598 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225614 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225841 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.225908 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h4kt\" (UniqueName: \"kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.230449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.231115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.232468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.238343 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.249808 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h4kt\" (UniqueName: \"kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt\") pod \"cinder-db-sync-bvnxp\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.312646 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.315700 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-t66fw"] Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.320379 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.324169 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.324405 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-cjrtl" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.326835 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.328596 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.330848 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjgtj\" (UniqueName: \"kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.331288 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.331722 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.337788 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.353604 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-t66fw"] Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.360871 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjgtj\" (UniqueName: \"kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj\") pod \"barbican-db-sync-r4pfq\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.433021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.433152 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.433191 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4fq2\" (UniqueName: \"kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.444571 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.535947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.535998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4fq2\" (UniqueName: \"kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.536087 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.540671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.543447 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.559769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4fq2\" (UniqueName: \"kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2\") pod \"neutron-db-sync-t66fw\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.703389 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-cjrtl" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.713513 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.760811 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c4f3feb-a977-4c65-8c72-b638fa15027a" path="/var/lib/kubelet/pods/4c4f3feb-a977-4c65-8c72-b638fa15027a/volumes" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.761947 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd79eeaf-5466-462c-a066-4386050eab44" path="/var/lib/kubelet/pods/bd79eeaf-5466-462c-a066-4386050eab44/volumes" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.821076 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.863187 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-n5pb8"] Nov 28 15:41:20 crc kubenswrapper[4884]: W1128 15:41:20.868292 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6c7973_c2da_49c1_be51_6901ab4ba930.slice/crio-35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a WatchSource:0}: Error finding container 35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a: Status 404 returned error can't find the container with id 35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.933937 4884 generic.go:334] "Generic (PLEG): container finished" podID="58efeb8a-6c15-405b-b5df-28c57c020229" containerID="843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" exitCode=143 Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.933983 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerDied","Data":"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.934015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerDied","Data":"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.934033 4884 scope.go:117] "RemoveContainer" containerID="843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.934045 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.933990 4884 generic.go:334] "Generic (PLEG): container finished" podID="58efeb8a-6c15-405b-b5df-28c57c020229" containerID="02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" exitCode=143 Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.934136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"58efeb8a-6c15-405b-b5df-28c57c020229","Type":"ContainerDied","Data":"c1a634d16ef455d8bf0173bac1857436b35f910ee698ef655588fdc7428a5f4f"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.936123 4884 generic.go:334] "Generic (PLEG): container finished" podID="bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" containerID="c35862251dd7fa84424b1efdd78c3b8680af8eed8b5adf29edc19bd3bd2ec964" exitCode=0 Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.936171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-sfxfp" event={"ID":"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c","Type":"ContainerDied","Data":"c35862251dd7fa84424b1efdd78c3b8680af8eed8b5adf29edc19bd3bd2ec964"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.937191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n5pb8" event={"ID":"6f6c7973-c2da-49c1-be51-6901ab4ba930","Type":"ContainerStarted","Data":"35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.938655 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerStarted","Data":"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8"} Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.943688 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944072 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944276 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7p64\" (UniqueName: \"kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944308 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944388 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944438 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.944530 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs\") pod \"58efeb8a-6c15-405b-b5df-28c57c020229\" (UID: \"58efeb8a-6c15-405b-b5df-28c57c020229\") " Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.946285 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs" (OuterVolumeSpecName: "logs") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.947151 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.949851 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.950660 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts" (OuterVolumeSpecName: "scripts") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.954083 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64" (OuterVolumeSpecName: "kube-api-access-c7p64") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "kube-api-access-c7p64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.977519 4884 scope.go:117] "RemoveContainer" containerID="02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" Nov 28 15:41:20 crc kubenswrapper[4884]: I1128 15:41:20.988810 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.006102 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data" (OuterVolumeSpecName: "config-data") pod "58efeb8a-6c15-405b-b5df-28c57c020229" (UID: "58efeb8a-6c15-405b-b5df-28c57c020229"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.031460 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-bvnxp"] Nov 28 15:41:21 crc kubenswrapper[4884]: W1128 15:41:21.038414 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9823f75_8df0_467c_af91_ad863667138b.slice/crio-4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37 WatchSource:0}: Error finding container 4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37: Status 404 returned error can't find the container with id 4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37 Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047160 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047186 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047196 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047209 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7p64\" (UniqueName: \"kubernetes.io/projected/58efeb8a-6c15-405b-b5df-28c57c020229-kube-api-access-c7p64\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047240 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047251 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58efeb8a-6c15-405b-b5df-28c57c020229-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.047259 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58efeb8a-6c15-405b-b5df-28c57c020229-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.067296 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.098989 4884 scope.go:117] "RemoveContainer" containerID="843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" Nov 28 15:41:21 crc kubenswrapper[4884]: E1128 15:41:21.099478 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef\": container with ID starting with 843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef not found: ID does not exist" containerID="843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.099516 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef"} err="failed to get container status \"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef\": rpc error: code = NotFound desc = could not find container \"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef\": container with ID starting with 843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef not found: ID does not exist" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.099543 4884 scope.go:117] "RemoveContainer" containerID="02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" Nov 28 15:41:21 crc kubenswrapper[4884]: E1128 15:41:21.099851 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a\": container with ID starting with 02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a not found: ID does not exist" containerID="02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.099893 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a"} err="failed to get container status \"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a\": rpc error: code = NotFound desc = could not find container \"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a\": container with ID starting with 02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a not found: ID does not exist" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.099921 4884 scope.go:117] "RemoveContainer" containerID="843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.100209 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef"} err="failed to get container status \"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef\": rpc error: code = NotFound desc = could not find container \"843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef\": container with ID starting with 843129d1358960b40d20d22fd4d850e7db1c16286f2ddbe71799b90c2e86ccef not found: ID does not exist" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.100241 4884 scope.go:117] "RemoveContainer" containerID="02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.100452 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a"} err="failed to get container status \"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a\": rpc error: code = NotFound desc = could not find container \"02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a\": container with ID starting with 02a1583941b6070241e1991121617f5497bf919cc47f807d5b7ee0fd0f605d3a not found: ID does not exist" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.149116 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.155109 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r4pfq"] Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.243183 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.243249 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:41:21 crc kubenswrapper[4884]: W1128 15:41:21.262878 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5493d26_8edb_4f78_8e5b_bd65d9490900.slice/crio-6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e WatchSource:0}: Error finding container 6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e: Status 404 returned error can't find the container with id 6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.265344 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-t66fw"] Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.276393 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.283916 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.312498 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:21 crc kubenswrapper[4884]: E1128 15:41:21.312850 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-httpd" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.312866 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-httpd" Nov 28 15:41:21 crc kubenswrapper[4884]: E1128 15:41:21.312890 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-log" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.312899 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-log" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.313071 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-httpd" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.313112 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" containerName="glance-log" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.313956 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.319184 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.319522 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.347303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355341 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355397 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355491 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355631 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355688 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6mrb\" (UniqueName: \"kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.355832 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.456869 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.456932 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.456974 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6mrb\" (UniqueName: \"kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457009 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457055 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457075 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457135 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.457633 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.459596 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.459841 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.463652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.470837 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.472832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.474841 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.477573 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6mrb\" (UniqueName: \"kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.511168 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.645332 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.972897 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t66fw" event={"ID":"f5493d26-8edb-4f78-8e5b-bd65d9490900","Type":"ContainerStarted","Data":"2ac986d672399f8208eab374707fb3a20c8d05e42870714340327e102a6f8c29"} Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.973259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t66fw" event={"ID":"f5493d26-8edb-4f78-8e5b-bd65d9490900","Type":"ContainerStarted","Data":"6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e"} Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.981371 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bvnxp" event={"ID":"b9823f75-8df0-467c-af91-ad863667138b","Type":"ContainerStarted","Data":"4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37"} Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.983862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n5pb8" event={"ID":"6f6c7973-c2da-49c1-be51-6901ab4ba930","Type":"ContainerStarted","Data":"bc5b536cad821d8563f531b0ae72f4474004545925faa5ffbb706c06cc0347bc"} Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.989982 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerStarted","Data":"40654bb99dd21d17b47b78f68100d5d60ed1dc859d7d8f62e2ea8bc162e48ee1"} Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.990202 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-log" containerID="cri-o://ce4a72ebff046a5bf51177d2c58b8ab34a666bbd00f08e944bdf366849fd0137" gracePeriod=30 Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.990350 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-httpd" containerID="cri-o://40654bb99dd21d17b47b78f68100d5d60ed1dc859d7d8f62e2ea8bc162e48ee1" gracePeriod=30 Nov 28 15:41:21 crc kubenswrapper[4884]: I1128 15:41:21.997809 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r4pfq" event={"ID":"3088250d-24c6-4378-9ab6-67e4244567eb","Type":"ContainerStarted","Data":"d2e3eb797574687001ab4929e81e86597266e5a6a8251f286401aa1354532a47"} Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.003442 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-t66fw" podStartSLOduration=2.003424013 podStartE2EDuration="2.003424013s" podCreationTimestamp="2025-11-28 15:41:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:21.988804497 +0000 UTC m=+1321.551588308" watchObservedRunningTime="2025-11-28 15:41:22.003424013 +0000 UTC m=+1321.566207804" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.020554 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-n5pb8" podStartSLOduration=3.020534839 podStartE2EDuration="3.020534839s" podCreationTimestamp="2025-11-28 15:41:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:22.009694496 +0000 UTC m=+1321.572478297" watchObservedRunningTime="2025-11-28 15:41:22.020534839 +0000 UTC m=+1321.583318640" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.033874 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.033854665 podStartE2EDuration="7.033854665s" podCreationTimestamp="2025-11-28 15:41:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:22.031612 +0000 UTC m=+1321.594395801" watchObservedRunningTime="2025-11-28 15:41:22.033854665 +0000 UTC m=+1321.596638466" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.472234 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.668958 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.704357 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58efeb8a-6c15-405b-b5df-28c57c020229" path="/var/lib/kubelet/pods/58efeb8a-6c15-405b-b5df-28c57c020229/volumes" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.787893 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle\") pod \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.787939 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs\") pod \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.788022 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcb26\" (UniqueName: \"kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26\") pod \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.788078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts\") pod \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.788118 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data\") pod \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\" (UID: \"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c\") " Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.788471 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs" (OuterVolumeSpecName: "logs") pod "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" (UID: "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.793951 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26" (OuterVolumeSpecName: "kube-api-access-vcb26") pod "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" (UID: "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c"). InnerVolumeSpecName "kube-api-access-vcb26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.798485 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts" (OuterVolumeSpecName: "scripts") pod "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" (UID: "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.831519 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data" (OuterVolumeSpecName: "config-data") pod "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" (UID: "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.858186 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" (UID: "bd1c087f-48cf-4b19-b8a4-ca7677f72f3c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.890844 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcb26\" (UniqueName: \"kubernetes.io/projected/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-kube-api-access-vcb26\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.890876 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.890885 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.890894 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:22 crc kubenswrapper[4884]: I1128 15:41:22.890904 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.015293 4884 generic.go:334] "Generic (PLEG): container finished" podID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerID="40654bb99dd21d17b47b78f68100d5d60ed1dc859d7d8f62e2ea8bc162e48ee1" exitCode=0 Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.015330 4884 generic.go:334] "Generic (PLEG): container finished" podID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerID="ce4a72ebff046a5bf51177d2c58b8ab34a666bbd00f08e944bdf366849fd0137" exitCode=143 Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.015378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerDied","Data":"40654bb99dd21d17b47b78f68100d5d60ed1dc859d7d8f62e2ea8bc162e48ee1"} Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.015409 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerDied","Data":"ce4a72ebff046a5bf51177d2c58b8ab34a666bbd00f08e944bdf366849fd0137"} Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.017553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerStarted","Data":"9b42b4900ae595a7d91289c6a6be4e53ab0e5313a1a188443876109297561868"} Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.024007 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-sfxfp" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.026170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-sfxfp" event={"ID":"bd1c087f-48cf-4b19-b8a4-ca7677f72f3c","Type":"ContainerDied","Data":"fa8ccdab4ea562c4d9019211fb206a64659436666f31948e501016054106d782"} Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.026216 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa8ccdab4ea562c4d9019211fb206a64659436666f31948e501016054106d782" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.109395 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.137992 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:41:23 crc kubenswrapper[4884]: E1128 15:41:23.138435 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-httpd" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138452 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-httpd" Nov 28 15:41:23 crc kubenswrapper[4884]: E1128 15:41:23.138482 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" containerName="placement-db-sync" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138491 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" containerName="placement-db-sync" Nov 28 15:41:23 crc kubenswrapper[4884]: E1128 15:41:23.138502 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-log" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138511 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-log" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138736 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" containerName="placement-db-sync" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138751 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-httpd" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.138763 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" containerName="glance-log" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.139795 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.147344 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.147609 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.147725 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.147873 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.148529 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gg9hf" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.165192 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199712 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199821 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8zt8\" (UniqueName: \"kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199910 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199932 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199976 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.199998 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts\") pod \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\" (UID: \"9eebed7a-4ad0-4cbf-818c-d69d419d2c86\") " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200222 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200296 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200320 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200343 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8m62\" (UniqueName: \"kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200372 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.200395 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.202316 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.203602 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs" (OuterVolumeSpecName: "logs") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.228256 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts" (OuterVolumeSpecName: "scripts") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.242081 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8" (OuterVolumeSpecName: "kube-api-access-s8zt8") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "kube-api-access-s8zt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.245459 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.271174 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302395 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302455 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302502 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302526 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302550 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8m62\" (UniqueName: \"kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302781 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302798 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302807 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302839 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8zt8\" (UniqueName: \"kubernetes.io/projected/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-kube-api-access-s8zt8\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302848 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.302858 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.308173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.310203 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data" (OuterVolumeSpecName: "config-data") pod "9eebed7a-4ad0-4cbf-818c-d69d419d2c86" (UID: "9eebed7a-4ad0-4cbf-818c-d69d419d2c86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.315604 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.317026 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.318379 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.322531 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.329184 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8m62\" (UniqueName: \"kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.338434 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts\") pod \"placement-7789c97d46-jmbnq\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.345828 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.404951 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eebed7a-4ad0-4cbf-818c-d69d419d2c86-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.404985 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.464469 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:23 crc kubenswrapper[4884]: I1128 15:41:23.965821 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:41:23 crc kubenswrapper[4884]: W1128 15:41:23.976213 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6237eb73_294e_4e4b_a619_e669061a1b5b.slice/crio-97d4a4efde61a9fefbf19dd65380e6b45dead8570ef82552146b687eb51f597f WatchSource:0}: Error finding container 97d4a4efde61a9fefbf19dd65380e6b45dead8570ef82552146b687eb51f597f: Status 404 returned error can't find the container with id 97d4a4efde61a9fefbf19dd65380e6b45dead8570ef82552146b687eb51f597f Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.033052 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerStarted","Data":"97d4a4efde61a9fefbf19dd65380e6b45dead8570ef82552146b687eb51f597f"} Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.035726 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9eebed7a-4ad0-4cbf-818c-d69d419d2c86","Type":"ContainerDied","Data":"90fd470302e9b2d789f554b413663696e18e9ff8bbc45535afae40302509eb83"} Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.035759 4884 scope.go:117] "RemoveContainer" containerID="40654bb99dd21d17b47b78f68100d5d60ed1dc859d7d8f62e2ea8bc162e48ee1" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.035860 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.075619 4884 scope.go:117] "RemoveContainer" containerID="ce4a72ebff046a5bf51177d2c58b8ab34a666bbd00f08e944bdf366849fd0137" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.091392 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.106606 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.185369 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.187048 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.189261 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.189956 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.190777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.321526 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.322393 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.323127 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvdmc\" (UniqueName: \"kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.323382 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.323434 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.323856 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.323879 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.324026 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.425940 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvdmc\" (UniqueName: \"kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.425997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426014 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426059 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426079 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426132 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426167 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426780 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.426957 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.427224 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.431302 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.432209 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.432680 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.432806 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.444061 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvdmc\" (UniqueName: \"kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.451905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.550306 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:24 crc kubenswrapper[4884]: I1128 15:41:24.714224 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9eebed7a-4ad0-4cbf-818c-d69d419d2c86" path="/var/lib/kubelet/pods/9eebed7a-4ad0-4cbf-818c-d69d419d2c86/volumes" Nov 28 15:41:25 crc kubenswrapper[4884]: I1128 15:41:25.050011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerStarted","Data":"4d34e7ecd7272753eb9a76334dacd796a0250216a60d30bc46bdd65b7be43497"} Nov 28 15:41:25 crc kubenswrapper[4884]: I1128 15:41:25.054763 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerStarted","Data":"647bdd1a93a1e22c498285d4cea6e39103c8031829ffee33e1eab7e3b045b02c"} Nov 28 15:41:25 crc kubenswrapper[4884]: I1128 15:41:25.054806 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerStarted","Data":"3a40d4c9d40608c334700862e223be9927b9d8a8774cfac47e1dd1e2a297ca90"} Nov 28 15:41:25 crc kubenswrapper[4884]: I1128 15:41:25.088775 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.088753717 podStartE2EDuration="4.088753717s" podCreationTimestamp="2025-11-28 15:41:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:25.075299188 +0000 UTC m=+1324.638083009" watchObservedRunningTime="2025-11-28 15:41:25.088753717 +0000 UTC m=+1324.651537538" Nov 28 15:41:25 crc kubenswrapper[4884]: I1128 15:41:25.145462 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:41:26 crc kubenswrapper[4884]: I1128 15:41:26.065624 4884 generic.go:334] "Generic (PLEG): container finished" podID="6f6c7973-c2da-49c1-be51-6901ab4ba930" containerID="bc5b536cad821d8563f531b0ae72f4474004545925faa5ffbb706c06cc0347bc" exitCode=0 Nov 28 15:41:26 crc kubenswrapper[4884]: I1128 15:41:26.065704 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n5pb8" event={"ID":"6f6c7973-c2da-49c1-be51-6901ab4ba930","Type":"ContainerDied","Data":"bc5b536cad821d8563f531b0ae72f4474004545925faa5ffbb706c06cc0347bc"} Nov 28 15:41:26 crc kubenswrapper[4884]: W1128 15:41:26.543805 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9af26263_cd93_4081_bdcd_518ad0587028.slice/crio-48513c64d92de00b4cd4b607c74300116e6b1f63a4ebea01bef53dc6acbc36c1 WatchSource:0}: Error finding container 48513c64d92de00b4cd4b607c74300116e6b1f63a4ebea01bef53dc6acbc36c1: Status 404 returned error can't find the container with id 48513c64d92de00b4cd4b607c74300116e6b1f63a4ebea01bef53dc6acbc36c1 Nov 28 15:41:27 crc kubenswrapper[4884]: I1128 15:41:27.076373 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerStarted","Data":"48513c64d92de00b4cd4b607c74300116e6b1f63a4ebea01bef53dc6acbc36c1"} Nov 28 15:41:31 crc kubenswrapper[4884]: I1128 15:41:31.646382 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:41:31 crc kubenswrapper[4884]: I1128 15:41:31.647024 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:41:31 crc kubenswrapper[4884]: I1128 15:41:31.698316 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:41:31 crc kubenswrapper[4884]: I1128 15:41:31.738110 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:41:32 crc kubenswrapper[4884]: I1128 15:41:32.123625 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:41:32 crc kubenswrapper[4884]: I1128 15:41:32.123673 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:41:33 crc kubenswrapper[4884]: I1128 15:41:33.975282 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.113214 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.152778 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.153320 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-n5pb8" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.153578 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-n5pb8" event={"ID":"6f6c7973-c2da-49c1-be51-6901ab4ba930","Type":"ContainerDied","Data":"35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a"} Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.153608 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35df7c21d92becb335f4a8485d4fdd2a33df2992ea052cf9ae10bdc1c38c897a" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.181180 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.243649 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.243839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.243896 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.244044 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.244139 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.244189 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvvh6\" (UniqueName: \"kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6\") pod \"6f6c7973-c2da-49c1-be51-6901ab4ba930\" (UID: \"6f6c7973-c2da-49c1-be51-6901ab4ba930\") " Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.254138 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.256670 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.291050 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6" (OuterVolumeSpecName: "kube-api-access-wvvh6") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "kube-api-access-wvvh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.291015 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts" (OuterVolumeSpecName: "scripts") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.298611 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.347605 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvvh6\" (UniqueName: \"kubernetes.io/projected/6f6c7973-c2da-49c1-be51-6901ab4ba930-kube-api-access-wvvh6\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.347644 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.347653 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.347661 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.347671 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.376058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data" (OuterVolumeSpecName: "config-data") pod "6f6c7973-c2da-49c1-be51-6901ab4ba930" (UID: "6f6c7973-c2da-49c1-be51-6901ab4ba930"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:34 crc kubenswrapper[4884]: I1128 15:41:34.449194 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6c7973-c2da-49c1-be51-6901ab4ba930-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.330119 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:41:35 crc kubenswrapper[4884]: E1128 15:41:35.330738 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f6c7973-c2da-49c1-be51-6901ab4ba930" containerName="keystone-bootstrap" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.330758 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f6c7973-c2da-49c1-be51-6901ab4ba930" containerName="keystone-bootstrap" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.331031 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f6c7973-c2da-49c1-be51-6901ab4ba930" containerName="keystone-bootstrap" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.332196 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.336637 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.336724 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-pjtcl" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.336874 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.338029 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.338395 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.339450 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.344851 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467772 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467821 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467921 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6m6l\" (UniqueName: \"kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467938 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467953 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.467989 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.468116 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569564 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569711 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569748 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569799 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.569867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6m6l\" (UniqueName: \"kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.575224 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.575535 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.575562 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.577494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.578366 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.579926 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.580005 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.589662 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6m6l\" (UniqueName: \"kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l\") pod \"keystone-7d44bc67d-rzq4r\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:35 crc kubenswrapper[4884]: I1128 15:41:35.663753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:44 crc kubenswrapper[4884]: E1128 15:41:44.181026 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 15:41:44 crc kubenswrapper[4884]: E1128 15:41:44.181866 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8h4kt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-bvnxp_openstack(b9823f75-8df0-467c-af91-ad863667138b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 15:41:44 crc kubenswrapper[4884]: E1128 15:41:44.188511 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-bvnxp" podUID="b9823f75-8df0-467c-af91-ad863667138b" Nov 28 15:41:44 crc kubenswrapper[4884]: E1128 15:41:44.248714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-bvnxp" podUID="b9823f75-8df0-467c-af91-ad863667138b" Nov 28 15:41:44 crc kubenswrapper[4884]: I1128 15:41:44.658829 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.251670 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerStarted","Data":"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.253780 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerStarted","Data":"70ad5e352be838cd9b823be6fa572eb0d45de5ed204f4a0a7f67e0afe112af20"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.254211 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.254275 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.256178 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerStarted","Data":"12844c127d5f9413cee20953145554f58e25d37c157f4afdbc8234813085905c"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.256218 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerStarted","Data":"010b40b6e1ffcab5263272b1d4bc325f6b108116be5718e9cd626f411cd03097"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.258171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d44bc67d-rzq4r" event={"ID":"91405fb1-1a28-4fb4-9548-84c4b1797d45","Type":"ContainerStarted","Data":"b935ab2ff92c4824683a68fa77cd7073eb24e434873a920819150e3598f5fb46"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.258220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d44bc67d-rzq4r" event={"ID":"91405fb1-1a28-4fb4-9548-84c4b1797d45","Type":"ContainerStarted","Data":"0d6f39598d566bf6fe9b24417092ac23d73694449aa875215bc03406fe1ff65a"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.258286 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.260520 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r4pfq" event={"ID":"3088250d-24c6-4378-9ab6-67e4244567eb","Type":"ContainerStarted","Data":"edd37f72384b03eb5519315a8e0249d6ae9ad1a1807125baf938035d6cdfba74"} Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.275747 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7789c97d46-jmbnq" podStartSLOduration=22.275730973 podStartE2EDuration="22.275730973s" podCreationTimestamp="2025-11-28 15:41:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:45.267877541 +0000 UTC m=+1344.830661352" watchObservedRunningTime="2025-11-28 15:41:45.275730973 +0000 UTC m=+1344.838514774" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.286992 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-r4pfq" podStartSLOduration=2.312798736 podStartE2EDuration="25.286970867s" podCreationTimestamp="2025-11-28 15:41:20 +0000 UTC" firstStartedPulling="2025-11-28 15:41:21.168384677 +0000 UTC m=+1320.731168498" lastFinishedPulling="2025-11-28 15:41:44.142556818 +0000 UTC m=+1343.705340629" observedRunningTime="2025-11-28 15:41:45.283991964 +0000 UTC m=+1344.846775765" watchObservedRunningTime="2025-11-28 15:41:45.286970867 +0000 UTC m=+1344.849754668" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.309176 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=21.309152787 podStartE2EDuration="21.309152787s" podCreationTimestamp="2025-11-28 15:41:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:45.302996017 +0000 UTC m=+1344.865779818" watchObservedRunningTime="2025-11-28 15:41:45.309152787 +0000 UTC m=+1344.871936598" Nov 28 15:41:45 crc kubenswrapper[4884]: I1128 15:41:45.327207 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7d44bc67d-rzq4r" podStartSLOduration=10.327185977 podStartE2EDuration="10.327185977s" podCreationTimestamp="2025-11-28 15:41:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:45.323336624 +0000 UTC m=+1344.886120425" watchObservedRunningTime="2025-11-28 15:41:45.327185977 +0000 UTC m=+1344.889969788" Nov 28 15:41:46 crc kubenswrapper[4884]: I1128 15:41:46.331793 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:47 crc kubenswrapper[4884]: I1128 15:41:47.283398 4884 generic.go:334] "Generic (PLEG): container finished" podID="f5493d26-8edb-4f78-8e5b-bd65d9490900" containerID="2ac986d672399f8208eab374707fb3a20c8d05e42870714340327e102a6f8c29" exitCode=0 Nov 28 15:41:47 crc kubenswrapper[4884]: I1128 15:41:47.283480 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t66fw" event={"ID":"f5493d26-8edb-4f78-8e5b-bd65d9490900","Type":"ContainerDied","Data":"2ac986d672399f8208eab374707fb3a20c8d05e42870714340327e102a6f8c29"} Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.293488 4884 generic.go:334] "Generic (PLEG): container finished" podID="3088250d-24c6-4378-9ab6-67e4244567eb" containerID="edd37f72384b03eb5519315a8e0249d6ae9ad1a1807125baf938035d6cdfba74" exitCode=0 Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.293540 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r4pfq" event={"ID":"3088250d-24c6-4378-9ab6-67e4244567eb","Type":"ContainerDied","Data":"edd37f72384b03eb5519315a8e0249d6ae9ad1a1807125baf938035d6cdfba74"} Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.621766 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.728483 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4fq2\" (UniqueName: \"kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2\") pod \"f5493d26-8edb-4f78-8e5b-bd65d9490900\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.728706 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle\") pod \"f5493d26-8edb-4f78-8e5b-bd65d9490900\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.729065 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config\") pod \"f5493d26-8edb-4f78-8e5b-bd65d9490900\" (UID: \"f5493d26-8edb-4f78-8e5b-bd65d9490900\") " Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.735711 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2" (OuterVolumeSpecName: "kube-api-access-z4fq2") pod "f5493d26-8edb-4f78-8e5b-bd65d9490900" (UID: "f5493d26-8edb-4f78-8e5b-bd65d9490900"). InnerVolumeSpecName "kube-api-access-z4fq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.758056 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5493d26-8edb-4f78-8e5b-bd65d9490900" (UID: "f5493d26-8edb-4f78-8e5b-bd65d9490900"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.770214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config" (OuterVolumeSpecName: "config") pod "f5493d26-8edb-4f78-8e5b-bd65d9490900" (UID: "f5493d26-8edb-4f78-8e5b-bd65d9490900"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.831154 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.831544 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5493d26-8edb-4f78-8e5b-bd65d9490900-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:48 crc kubenswrapper[4884]: I1128 15:41:48.831622 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4fq2\" (UniqueName: \"kubernetes.io/projected/f5493d26-8edb-4f78-8e5b-bd65d9490900-kube-api-access-z4fq2\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.304510 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-t66fw" event={"ID":"f5493d26-8edb-4f78-8e5b-bd65d9490900","Type":"ContainerDied","Data":"6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e"} Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.304547 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ca1f19366fb3b49895c362c3f1d19b18ac375bf1a6e37003974a4f79a003b9e" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.304555 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-t66fw" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.544007 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:49 crc kubenswrapper[4884]: E1128 15:41:49.545042 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5493d26-8edb-4f78-8e5b-bd65d9490900" containerName="neutron-db-sync" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.545064 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5493d26-8edb-4f78-8e5b-bd65d9490900" containerName="neutron-db-sync" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.546404 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5493d26-8edb-4f78-8e5b-bd65d9490900" containerName="neutron-db-sync" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.547444 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.554366 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.633405 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.635187 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.637573 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.637817 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.637945 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-cjrtl" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.638170 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653038 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653112 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653153 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlkr2\" (UniqueName: \"kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.653339 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755407 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755457 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhpmp\" (UniqueName: \"kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755611 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755641 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755690 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.755707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlkr2\" (UniqueName: \"kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.757038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.757574 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.758126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.758620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.759443 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.773892 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlkr2\" (UniqueName: \"kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2\") pod \"dnsmasq-dns-5ccc5c4795-sxhl6\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.857483 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhpmp\" (UniqueName: \"kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.858690 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.858812 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.858968 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.859035 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.863287 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.863977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.864584 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.867696 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.891322 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.891811 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhpmp\" (UniqueName: \"kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp\") pod \"neutron-657c567946-l5n4g\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:49 crc kubenswrapper[4884]: I1128 15:41:49.974579 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.243505 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.243554 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.801968 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.807917 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.809947 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.809946 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.812887 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922718 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct6rx\" (UniqueName: \"kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922808 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922835 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922856 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922928 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:51 crc kubenswrapper[4884]: I1128 15:41:51.922992 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.025592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.025666 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.025704 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.025829 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.026035 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct6rx\" (UniqueName: \"kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.026147 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.026174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.032832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.032988 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.034318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.035888 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.038483 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.039044 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.049168 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct6rx\" (UniqueName: \"kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx\") pod \"neutron-67dd7bd66f-2ff2l\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:52 crc kubenswrapper[4884]: I1128 15:41:52.130756 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.013464 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.064633 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data\") pod \"3088250d-24c6-4378-9ab6-67e4244567eb\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.064758 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjgtj\" (UniqueName: \"kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj\") pod \"3088250d-24c6-4378-9ab6-67e4244567eb\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.064787 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle\") pod \"3088250d-24c6-4378-9ab6-67e4244567eb\" (UID: \"3088250d-24c6-4378-9ab6-67e4244567eb\") " Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.070470 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj" (OuterVolumeSpecName: "kube-api-access-bjgtj") pod "3088250d-24c6-4378-9ab6-67e4244567eb" (UID: "3088250d-24c6-4378-9ab6-67e4244567eb"). InnerVolumeSpecName "kube-api-access-bjgtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.071830 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3088250d-24c6-4378-9ab6-67e4244567eb" (UID: "3088250d-24c6-4378-9ab6-67e4244567eb"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.100040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3088250d-24c6-4378-9ab6-67e4244567eb" (UID: "3088250d-24c6-4378-9ab6-67e4244567eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.167476 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.167521 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjgtj\" (UniqueName: \"kubernetes.io/projected/3088250d-24c6-4378-9ab6-67e4244567eb-kube-api-access-bjgtj\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.167536 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3088250d-24c6-4378-9ab6-67e4244567eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.367006 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r4pfq" event={"ID":"3088250d-24c6-4378-9ab6-67e4244567eb","Type":"ContainerDied","Data":"d2e3eb797574687001ab4929e81e86597266e5a6a8251f286401aa1354532a47"} Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.367363 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2e3eb797574687001ab4929e81e86597266e5a6a8251f286401aa1354532a47" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.367037 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r4pfq" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369351 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerStarted","Data":"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0"} Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369547 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369577 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="proxy-httpd" containerID="cri-o://554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0" gracePeriod=30 Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369588 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-central-agent" containerID="cri-o://367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39" gracePeriod=30 Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369718 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="sg-core" containerID="cri-o://30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6" gracePeriod=30 Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.369822 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-notification-agent" containerID="cri-o://4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8" gracePeriod=30 Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.370887 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.397731 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.905130361 podStartE2EDuration="48.397715636s" podCreationTimestamp="2025-11-28 15:41:06 +0000 UTC" firstStartedPulling="2025-11-28 15:41:10.393443246 +0000 UTC m=+1309.956227047" lastFinishedPulling="2025-11-28 15:41:53.886028511 +0000 UTC m=+1353.448812322" observedRunningTime="2025-11-28 15:41:54.392388936 +0000 UTC m=+1353.955172737" watchObservedRunningTime="2025-11-28 15:41:54.397715636 +0000 UTC m=+1353.960499437" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.429817 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.457371 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.536944 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.551227 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.552696 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.552730 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.553086 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.675746 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:54 crc kubenswrapper[4884]: I1128 15:41:54.706018 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.204852 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:41:55 crc kubenswrapper[4884]: E1128 15:41:55.205390 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3088250d-24c6-4378-9ab6-67e4244567eb" containerName="barbican-db-sync" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.205401 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3088250d-24c6-4378-9ab6-67e4244567eb" containerName="barbican-db-sync" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.205579 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3088250d-24c6-4378-9ab6-67e4244567eb" containerName="barbican-db-sync" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.206398 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.218922 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.219078 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.219209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-hkn9p" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.228750 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.268266 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.269715 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.273435 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.278705 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.292467 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.292726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.292881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.292962 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gd2sd\" (UniqueName: \"kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.293072 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.319345 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.343083 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.344544 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.352288 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.383932 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerStarted","Data":"1b21629c9f6a6e80ef6dd9b05cf79286957a70256a474418362e2ecb678ee394"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.383976 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerStarted","Data":"f9802f280f0524d6a7af18d87d664c767e19efd621585320045209b91d143e3d"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.383985 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerStarted","Data":"bde0abbf23b9fb0c53261fd85ff2eacf8d488c5a15cbb24f2a8f83a61d7faee4"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.385296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.390598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerStarted","Data":"6058d7f72df016355acac3c48d8e08bd8294db35efcb91a326adb1708153048c"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.390648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerStarted","Data":"f5481c335b4d3298f0e98cb830ee1e1cade042a57387db9cfad9455b405cd581"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.390659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerStarted","Data":"7cc9183f634ad8d78ec2fd6ec92a6c26b6b223f306419a67456fcc01d8397755"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.390919 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.396311 4884 generic.go:334] "Generic (PLEG): container finished" podID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" containerID="1ed4da5ca2784ad18d7ac179c6bd97cca4c984efccfcce500f84ecafa684d052" exitCode=0 Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.396384 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" event={"ID":"2b722fdc-566f-4f41-a70d-2d50a51e1b16","Type":"ContainerDied","Data":"1ed4da5ca2784ad18d7ac179c6bd97cca4c984efccfcce500f84ecafa684d052"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.396411 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" event={"ID":"2b722fdc-566f-4f41-a70d-2d50a51e1b16","Type":"ContainerStarted","Data":"148c7648f915574b53ed6cd04a698eb671dcd2418dd8d57b3ccdf56fddbcec39"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.397994 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398040 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gd2sd\" (UniqueName: \"kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398107 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398138 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g7jl\" (UniqueName: \"kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398182 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398273 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398290 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.398308 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.399658 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.403484 4884 generic.go:334] "Generic (PLEG): container finished" podID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerID="554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0" exitCode=0 Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.403514 4884 generic.go:334] "Generic (PLEG): container finished" podID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerID="30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6" exitCode=2 Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.403600 4884 generic.go:334] "Generic (PLEG): container finished" podID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerID="367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39" exitCode=0 Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.406108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerDied","Data":"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.406136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerDied","Data":"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.406237 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerDied","Data":"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39"} Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.408053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.408858 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.439115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.456972 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gd2sd\" (UniqueName: \"kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd\") pod \"barbican-worker-654d667c99-rmxwg\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.485512 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-657c567946-l5n4g" podStartSLOduration=6.485491184 podStartE2EDuration="6.485491184s" podCreationTimestamp="2025-11-28 15:41:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:55.444616646 +0000 UTC m=+1355.007400447" watchObservedRunningTime="2025-11-28 15:41:55.485491184 +0000 UTC m=+1355.048274985" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.518451 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.521678 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530571 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530712 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g7jl\" (UniqueName: \"kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530859 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530909 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.530982 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.531020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.531041 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.531066 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czvzn\" (UniqueName: \"kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.531132 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.531928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.587801 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.588595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.590271 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.599642 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.610143 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.618652 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-67dd7bd66f-2ff2l" podStartSLOduration=4.618630479 podStartE2EDuration="4.618630479s" podCreationTimestamp="2025-11-28 15:41:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:55.590717579 +0000 UTC m=+1355.153501390" watchObservedRunningTime="2025-11-28 15:41:55.618630479 +0000 UTC m=+1355.181414280" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635150 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635194 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635209 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czvzn\" (UniqueName: \"kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.635317 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.637749 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.640942 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.642115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g7jl\" (UniqueName: \"kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl\") pod \"barbican-keystone-listener-75d9f978b8-b8bls\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.642963 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.643459 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.645019 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.657518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.702721 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czvzn\" (UniqueName: \"kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn\") pod \"dnsmasq-dns-688c87cc99-7h82d\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.743942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp6dg\" (UniqueName: \"kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.744102 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.744206 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.744258 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.744418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.849822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp6dg\" (UniqueName: \"kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.850267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.850324 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.850363 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.850395 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.850881 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.859273 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.859724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.867428 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.874808 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp6dg\" (UniqueName: \"kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg\") pod \"barbican-api-7cc8c9b9fd-zqcg4\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.891507 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.969224 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:55 crc kubenswrapper[4884]: I1128 15:41:55.976976 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:41:56 crc kubenswrapper[4884]: E1128 15:41:56.068222 4884 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 28 15:41:56 crc kubenswrapper[4884]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/2b722fdc-566f-4f41-a70d-2d50a51e1b16/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 15:41:56 crc kubenswrapper[4884]: > podSandboxID="148c7648f915574b53ed6cd04a698eb671dcd2418dd8d57b3ccdf56fddbcec39" Nov 28 15:41:56 crc kubenswrapper[4884]: E1128 15:41:56.068383 4884 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 15:41:56 crc kubenswrapper[4884]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5dchc4h67h57hb6h78hcdh7dh96h697h5d6h5ddh58h68ch575h574h686h56h578h5d6h8bh699h7chchcbhdfhb9h5d8h88h554h57h564q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hlkr2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc5c4795-sxhl6_openstack(2b722fdc-566f-4f41-a70d-2d50a51e1b16): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/2b722fdc-566f-4f41-a70d-2d50a51e1b16/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 15:41:56 crc kubenswrapper[4884]: > logger="UnhandledError" Nov 28 15:41:56 crc kubenswrapper[4884]: E1128 15:41:56.069752 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/2b722fdc-566f-4f41-a70d-2d50a51e1b16/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" podUID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" Nov 28 15:41:56 crc kubenswrapper[4884]: I1128 15:41:56.190437 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:41:56 crc kubenswrapper[4884]: W1128 15:41:56.194969 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafabf28f_eb82_4439_aa4f_3154e1007bf5.slice/crio-810f27cfb84ea05c5b82665c8896f872a37c09460e1fd5324761989fdf2c4ae4 WatchSource:0}: Error finding container 810f27cfb84ea05c5b82665c8896f872a37c09460e1fd5324761989fdf2c4ae4: Status 404 returned error can't find the container with id 810f27cfb84ea05c5b82665c8896f872a37c09460e1fd5324761989fdf2c4ae4 Nov 28 15:41:56 crc kubenswrapper[4884]: I1128 15:41:56.418181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerStarted","Data":"810f27cfb84ea05c5b82665c8896f872a37c09460e1fd5324761989fdf2c4ae4"} Nov 28 15:41:56 crc kubenswrapper[4884]: I1128 15:41:56.499712 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:41:56 crc kubenswrapper[4884]: I1128 15:41:56.550179 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:41:56 crc kubenswrapper[4884]: W1128 15:41:56.559704 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56aa1c8e_ed74_42bf_8556_965f37e377b3.slice/crio-5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25 WatchSource:0}: Error finding container 5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25: Status 404 returned error can't find the container with id 5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25 Nov 28 15:41:56 crc kubenswrapper[4884]: I1128 15:41:56.638040 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.446772 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" event={"ID":"2b722fdc-566f-4f41-a70d-2d50a51e1b16","Type":"ContainerDied","Data":"148c7648f915574b53ed6cd04a698eb671dcd2418dd8d57b3ccdf56fddbcec39"} Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.447107 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="148c7648f915574b53ed6cd04a698eb671dcd2418dd8d57b3ccdf56fddbcec39" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.448048 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerStarted","Data":"d0afc34282eeb448655090d53a5d3b043c457c04224d33cd424ddbef17ae8b3d"} Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.448933 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerStarted","Data":"5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25"} Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.450555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" event={"ID":"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a","Type":"ContainerStarted","Data":"f91d7fb5c8f52d0f11988f85a6b0df1fb303512e02341fee3a4e92734fc7faa6"} Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.652927 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.693578 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.693725 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlkr2\" (UniqueName: \"kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.693897 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.694058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.694294 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.694395 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0\") pod \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\" (UID: \"2b722fdc-566f-4f41-a70d-2d50a51e1b16\") " Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.706465 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2" (OuterVolumeSpecName: "kube-api-access-hlkr2") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "kube-api-access-hlkr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.749670 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.755364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.776400 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.800321 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlkr2\" (UniqueName: \"kubernetes.io/projected/2b722fdc-566f-4f41-a70d-2d50a51e1b16-kube-api-access-hlkr2\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.800346 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.800355 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.800364 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.800391 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.802376 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config" (OuterVolumeSpecName: "config") pod "2b722fdc-566f-4f41-a70d-2d50a51e1b16" (UID: "2b722fdc-566f-4f41-a70d-2d50a51e1b16"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.902415 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:57 crc kubenswrapper[4884]: I1128 15:41:57.902436 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2b722fdc-566f-4f41-a70d-2d50a51e1b16-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.038166 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.038594 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.046206 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.334011 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.412511 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.412892 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.412920 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mcqh\" (UniqueName: \"kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.412957 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.413003 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.413043 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.413067 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd\") pod \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\" (UID: \"73b50da4-4218-4886-b687-1bcd1bd3c6b5\") " Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.413799 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.414042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.426920 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts" (OuterVolumeSpecName: "scripts") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.430319 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh" (OuterVolumeSpecName: "kube-api-access-2mcqh") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "kube-api-access-2mcqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.456290 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.461501 4884 generic.go:334] "Generic (PLEG): container finished" podID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerID="3dc1b748efa35e876aa39c3b83376d72dddabbff27a394ab1563c9ec9fc73d4a" exitCode=0 Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.461555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" event={"ID":"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a","Type":"ContainerDied","Data":"3dc1b748efa35e876aa39c3b83376d72dddabbff27a394ab1563c9ec9fc73d4a"} Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.471874 4884 generic.go:334] "Generic (PLEG): container finished" podID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerID="4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8" exitCode=0 Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.471927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerDied","Data":"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8"} Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.471951 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"73b50da4-4218-4886-b687-1bcd1bd3c6b5","Type":"ContainerDied","Data":"1ca8f5336b4b72a678478b6c246fefa6295971b4eb7fc8c621e9914c69e9840d"} Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.471967 4884 scope.go:117] "RemoveContainer" containerID="554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.472083 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.499663 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-sxhl6" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.500024 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerStarted","Data":"3f5da2b67edbeba186939a6c7b896f0e81ce009bf1392ad45ba9fd37e154916b"} Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.502217 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerStarted","Data":"cac4ad84ecf6419d28c8c00201ea020c4f4d4f1a1a5c60443d2e5da27ea51d0f"} Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.502286 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.502371 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.517944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532319 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532372 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532386 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mcqh\" (UniqueName: \"kubernetes.io/projected/73b50da4-4218-4886-b687-1bcd1bd3c6b5-kube-api-access-2mcqh\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532400 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532478 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.532491 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/73b50da4-4218-4886-b687-1bcd1bd3c6b5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.563343 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podStartSLOduration=3.563314814 podStartE2EDuration="3.563314814s" podCreationTimestamp="2025-11-28 15:41:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:41:58.538611872 +0000 UTC m=+1358.101395673" watchObservedRunningTime="2025-11-28 15:41:58.563314814 +0000 UTC m=+1358.126098615" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.593568 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data" (OuterVolumeSpecName: "config-data") pod "73b50da4-4218-4886-b687-1bcd1bd3c6b5" (UID: "73b50da4-4218-4886-b687-1bcd1bd3c6b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.599635 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.605585 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-sxhl6"] Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.634075 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73b50da4-4218-4886-b687-1bcd1bd3c6b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.706890 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" path="/var/lib/kubelet/pods/2b722fdc-566f-4f41-a70d-2d50a51e1b16/volumes" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.853138 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.867984 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.875416 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:58 crc kubenswrapper[4884]: E1128 15:41:58.875952 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-central-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.875970 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-central-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: E1128 15:41:58.875990 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="sg-core" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.875998 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="sg-core" Nov 28 15:41:58 crc kubenswrapper[4884]: E1128 15:41:58.876021 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" containerName="init" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876030 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" containerName="init" Nov 28 15:41:58 crc kubenswrapper[4884]: E1128 15:41:58.876043 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-notification-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876050 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-notification-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: E1128 15:41:58.876059 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="proxy-httpd" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876065 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="proxy-httpd" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876337 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="sg-core" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876353 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b722fdc-566f-4f41-a70d-2d50a51e1b16" containerName="init" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876368 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-central-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876376 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="proxy-httpd" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.876399 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" containerName="ceilometer-notification-agent" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.878082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.881179 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.881606 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:41:58 crc kubenswrapper[4884]: I1128 15:41:58.885050 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.041678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.042224 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.043086 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.043173 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.043355 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.043402 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.043420 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjhjt\" (UniqueName: \"kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145000 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145560 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145602 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145686 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145709 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.145726 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjhjt\" (UniqueName: \"kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.146468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.146791 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.150038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.150931 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.151346 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.152013 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.173681 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjhjt\" (UniqueName: \"kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt\") pod \"ceilometer-0\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.197310 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.199695 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.201514 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.205244 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.209216 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.213457 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.269623 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.269687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.269724 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cb96\" (UniqueName: \"kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.269817 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.270020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.270133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.270170 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.371896 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372286 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372436 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372565 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372890 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cb96\" (UniqueName: \"kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.372856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.379064 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.379552 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.379951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.380157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.386053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.387333 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cb96\" (UniqueName: \"kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96\") pod \"barbican-api-69c66bbb4b-wzn9n\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.588753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.646740 4884 scope.go:117] "RemoveContainer" containerID="30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.687716 4884 scope.go:117] "RemoveContainer" containerID="4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.715009 4884 scope.go:117] "RemoveContainer" containerID="367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.792593 4884 scope.go:117] "RemoveContainer" containerID="554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0" Nov 28 15:41:59 crc kubenswrapper[4884]: E1128 15:41:59.795234 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0\": container with ID starting with 554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0 not found: ID does not exist" containerID="554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.795268 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0"} err="failed to get container status \"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0\": rpc error: code = NotFound desc = could not find container \"554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0\": container with ID starting with 554c41731de0660f0bef1950ab62b053b611ea009b89b19919a3b0890d4e0ad0 not found: ID does not exist" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.795293 4884 scope.go:117] "RemoveContainer" containerID="30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6" Nov 28 15:41:59 crc kubenswrapper[4884]: E1128 15:41:59.801878 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6\": container with ID starting with 30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6 not found: ID does not exist" containerID="30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.801939 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6"} err="failed to get container status \"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6\": rpc error: code = NotFound desc = could not find container \"30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6\": container with ID starting with 30dbaf883923d897a73be8d533efa10fda7fdffdddede48bc5e220ffb80a4bb6 not found: ID does not exist" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.801971 4884 scope.go:117] "RemoveContainer" containerID="4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8" Nov 28 15:41:59 crc kubenswrapper[4884]: E1128 15:41:59.802839 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8\": container with ID starting with 4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8 not found: ID does not exist" containerID="4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.802864 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8"} err="failed to get container status \"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8\": rpc error: code = NotFound desc = could not find container \"4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8\": container with ID starting with 4f4c473c747b06c643e12625239d22f2985decd229d0fa6cc4cb14a44bba68a8 not found: ID does not exist" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.802894 4884 scope.go:117] "RemoveContainer" containerID="367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39" Nov 28 15:41:59 crc kubenswrapper[4884]: E1128 15:41:59.803460 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39\": container with ID starting with 367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39 not found: ID does not exist" containerID="367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39" Nov 28 15:41:59 crc kubenswrapper[4884]: I1128 15:41:59.803486 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39"} err="failed to get container status \"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39\": rpc error: code = NotFound desc = could not find container \"367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39\": container with ID starting with 367f473dd652e36711198a792699add8c84cf57dd772516fd57a424295253d39 not found: ID does not exist" Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.226457 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.538121 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerStarted","Data":"0ac907a0c0af6ead7b7e99a49821773ab7d22cc68e8accca85a5d26f4aca83ec"} Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.543127 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.548213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" event={"ID":"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a","Type":"ContainerStarted","Data":"bbcdc8a6448f7e66d3e117bb173765f368c6f842b4073e94576bd04ba80b446f"} Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.548413 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.554536 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerStarted","Data":"28ad68ceff5f0f967731e7bf8cc9040edd0e4ee6d744fecf560bed0140bd90bf"} Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.557628 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerStarted","Data":"55d3b2d4ca658ad8c1510b29e4414ddf3fb3b590d3f93d47c782a243e50ec135"} Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.574888 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" podStartSLOduration=5.574872721 podStartE2EDuration="5.574872721s" podCreationTimestamp="2025-11-28 15:41:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:00.57480507 +0000 UTC m=+1360.137588861" watchObservedRunningTime="2025-11-28 15:42:00.574872721 +0000 UTC m=+1360.137656512" Nov 28 15:42:00 crc kubenswrapper[4884]: I1128 15:42:00.702743 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73b50da4-4218-4886-b687-1bcd1bd3c6b5" path="/var/lib/kubelet/pods/73b50da4-4218-4886-b687-1bcd1bd3c6b5/volumes" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.567405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerStarted","Data":"b5f34154cfef87b816d78795660796edfca1950b74e2cd3c82c08584957201a9"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.569167 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bvnxp" event={"ID":"b9823f75-8df0-467c-af91-ad863667138b","Type":"ContainerStarted","Data":"96012b560b8646ae95f8d578e115efcf936c4cf2e7d77a3d610028cab2e4960e"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.570901 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerStarted","Data":"02309734d390a1a60061c2de6a1ad950eea1ac1ad9073bbfc9869f4baf64d813"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.572406 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerStarted","Data":"6c624f23934a2757b9d0fda39e8ae22b7e58a88676450e0d3ad4c24f2ee510f7"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.572432 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerStarted","Data":"a13fa47dc18a9e97c12845d9bf327b2a99237952371e8e2be117fbd3fd157b73"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.572444 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerStarted","Data":"dbd261bf5e4f95b88bd84297cbca7b56b8ee21d4e5d6972fd71eec32ed161998"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.572585 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.572603 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.573992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerStarted","Data":"4e4cd6967466ab3a0672d9d4bc02337b1a82e6fcde1fe6e097fa23def5ed621e"} Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.594376 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" podStartSLOduration=3.409061874 podStartE2EDuration="6.594362105s" podCreationTimestamp="2025-11-28 15:41:55 +0000 UTC" firstStartedPulling="2025-11-28 15:41:56.511770702 +0000 UTC m=+1356.074554503" lastFinishedPulling="2025-11-28 15:41:59.697070933 +0000 UTC m=+1359.259854734" observedRunningTime="2025-11-28 15:42:01.589559057 +0000 UTC m=+1361.152342868" watchObservedRunningTime="2025-11-28 15:42:01.594362105 +0000 UTC m=+1361.157145906" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.616601 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-bvnxp" podStartSLOduration=4.094958573 podStartE2EDuration="42.616584316s" podCreationTimestamp="2025-11-28 15:41:19 +0000 UTC" firstStartedPulling="2025-11-28 15:41:21.040901249 +0000 UTC m=+1320.603685050" lastFinishedPulling="2025-11-28 15:41:59.562526992 +0000 UTC m=+1359.125310793" observedRunningTime="2025-11-28 15:42:01.606807788 +0000 UTC m=+1361.169591589" watchObservedRunningTime="2025-11-28 15:42:01.616584316 +0000 UTC m=+1361.179368117" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.640085 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-69c66bbb4b-wzn9n" podStartSLOduration=2.640065979 podStartE2EDuration="2.640065979s" podCreationTimestamp="2025-11-28 15:41:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:01.633868248 +0000 UTC m=+1361.196652059" watchObservedRunningTime="2025-11-28 15:42:01.640065979 +0000 UTC m=+1361.202849790" Nov 28 15:42:01 crc kubenswrapper[4884]: I1128 15:42:01.672775 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-654d667c99-rmxwg" podStartSLOduration=3.178078514 podStartE2EDuration="6.672750236s" podCreationTimestamp="2025-11-28 15:41:55 +0000 UTC" firstStartedPulling="2025-11-28 15:41:56.202412031 +0000 UTC m=+1355.765195832" lastFinishedPulling="2025-11-28 15:41:59.697083753 +0000 UTC m=+1359.259867554" observedRunningTime="2025-11-28 15:42:01.65529367 +0000 UTC m=+1361.218077521" watchObservedRunningTime="2025-11-28 15:42:01.672750236 +0000 UTC m=+1361.235534037" Nov 28 15:42:02 crc kubenswrapper[4884]: I1128 15:42:02.583577 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerStarted","Data":"481a33cbd27fc2f0bff62122445eb65934ae3a3b51f85ae80f7c70b42851b13e"} Nov 28 15:42:03 crc kubenswrapper[4884]: I1128 15:42:03.617315 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerStarted","Data":"15de6a2d92b593a2651de65bd3f3d4f6802de064d076d8a610b3a2adef599b99"} Nov 28 15:42:04 crc kubenswrapper[4884]: I1128 15:42:04.628591 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerStarted","Data":"e1dd94495ef2e1dc1412067054aa71f40a0bfaf391be7d01ad919cd3da77602e"} Nov 28 15:42:04 crc kubenswrapper[4884]: I1128 15:42:04.629982 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:42:04 crc kubenswrapper[4884]: I1128 15:42:04.658257 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.763588327 podStartE2EDuration="6.658236299s" podCreationTimestamp="2025-11-28 15:41:58 +0000 UTC" firstStartedPulling="2025-11-28 15:42:00.291645967 +0000 UTC m=+1359.854429768" lastFinishedPulling="2025-11-28 15:42:04.186293919 +0000 UTC m=+1363.749077740" observedRunningTime="2025-11-28 15:42:04.653954906 +0000 UTC m=+1364.216738707" watchObservedRunningTime="2025-11-28 15:42:04.658236299 +0000 UTC m=+1364.221020100" Nov 28 15:42:05 crc kubenswrapper[4884]: I1128 15:42:05.639057 4884 generic.go:334] "Generic (PLEG): container finished" podID="b9823f75-8df0-467c-af91-ad863667138b" containerID="96012b560b8646ae95f8d578e115efcf936c4cf2e7d77a3d610028cab2e4960e" exitCode=0 Nov 28 15:42:05 crc kubenswrapper[4884]: I1128 15:42:05.639133 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bvnxp" event={"ID":"b9823f75-8df0-467c-af91-ad863667138b","Type":"ContainerDied","Data":"96012b560b8646ae95f8d578e115efcf936c4cf2e7d77a3d610028cab2e4960e"} Nov 28 15:42:05 crc kubenswrapper[4884]: I1128 15:42:05.978737 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.036131 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.036405 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="dnsmasq-dns" containerID="cri-o://dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c" gracePeriod=10 Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.626586 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.659445 4884 generic.go:334] "Generic (PLEG): container finished" podID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerID="dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c" exitCode=0 Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.659716 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.660455 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" event={"ID":"1a186df2-95e5-4f8c-9184-0eeef5af978c","Type":"ContainerDied","Data":"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c"} Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.660481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-wp9nt" event={"ID":"1a186df2-95e5-4f8c-9184-0eeef5af978c","Type":"ContainerDied","Data":"f8a6af4d9cf9bd80bc19d3a278e668bdeda0a20140dd18918473fa561a1622dc"} Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.660499 4884 scope.go:117] "RemoveContainer" containerID="dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.715687 4884 scope.go:117] "RemoveContainer" containerID="6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716134 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716199 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj6z9\" (UniqueName: \"kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716226 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716355 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.716453 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb\") pod \"1a186df2-95e5-4f8c-9184-0eeef5af978c\" (UID: \"1a186df2-95e5-4f8c-9184-0eeef5af978c\") " Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.725280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9" (OuterVolumeSpecName: "kube-api-access-qj6z9") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "kube-api-access-qj6z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.807977 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.835354 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.835386 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj6z9\" (UniqueName: \"kubernetes.io/projected/1a186df2-95e5-4f8c-9184-0eeef5af978c-kube-api-access-qj6z9\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.845130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config" (OuterVolumeSpecName: "config") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.862281 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.869348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.877469 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a186df2-95e5-4f8c-9184-0eeef5af978c" (UID: "1a186df2-95e5-4f8c-9184-0eeef5af978c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.936272 4884 scope.go:117] "RemoveContainer" containerID="dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.937170 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.937202 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.937211 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.937220 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a186df2-95e5-4f8c-9184-0eeef5af978c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:06 crc kubenswrapper[4884]: E1128 15:42:06.940221 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c\": container with ID starting with dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c not found: ID does not exist" containerID="dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.940270 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c"} err="failed to get container status \"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c\": rpc error: code = NotFound desc = could not find container \"dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c\": container with ID starting with dc955babc9dfbcbde4ff6c8ca42eff0d1c407f3761b2dba91d790832d0185d8c not found: ID does not exist" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.940295 4884 scope.go:117] "RemoveContainer" containerID="6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4" Nov 28 15:42:06 crc kubenswrapper[4884]: E1128 15:42:06.940633 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4\": container with ID starting with 6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4 not found: ID does not exist" containerID="6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4" Nov 28 15:42:06 crc kubenswrapper[4884]: I1128 15:42:06.940669 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4"} err="failed to get container status \"6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4\": rpc error: code = NotFound desc = could not find container \"6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4\": container with ID starting with 6bfc0c4532dc67dc87bdc021d2d0c80f64c83cd5c053163197e402cabdfcc7c4 not found: ID does not exist" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.047979 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.051071 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.062228 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-wp9nt"] Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.139910 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140010 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140037 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140000 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140140 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h4kt\" (UniqueName: \"kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140161 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140175 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle\") pod \"b9823f75-8df0-467c-af91-ad863667138b\" (UID: \"b9823f75-8df0-467c-af91-ad863667138b\") " Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.140565 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b9823f75-8df0-467c-af91-ad863667138b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.144988 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.145247 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts" (OuterVolumeSpecName: "scripts") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.146205 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt" (OuterVolumeSpecName: "kube-api-access-8h4kt") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "kube-api-access-8h4kt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.172213 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.196289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data" (OuterVolumeSpecName: "config-data") pod "b9823f75-8df0-467c-af91-ad863667138b" (UID: "b9823f75-8df0-467c-af91-ad863667138b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.242281 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.242334 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.242346 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h4kt\" (UniqueName: \"kubernetes.io/projected/b9823f75-8df0-467c-af91-ad863667138b-kube-api-access-8h4kt\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.242357 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.242366 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9823f75-8df0-467c-af91-ad863667138b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.669213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-bvnxp" event={"ID":"b9823f75-8df0-467c-af91-ad863667138b","Type":"ContainerDied","Data":"4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37"} Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.669263 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b325eb008624336d56fde5da286985649e2d3a396ea8d4d212bca335f293d37" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.669300 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-bvnxp" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.672907 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.864260 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.946099 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:07 crc kubenswrapper[4884]: E1128 15:42:07.946598 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="init" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.946620 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="init" Nov 28 15:42:07 crc kubenswrapper[4884]: E1128 15:42:07.946646 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="dnsmasq-dns" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.946653 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="dnsmasq-dns" Nov 28 15:42:07 crc kubenswrapper[4884]: E1128 15:42:07.946660 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9823f75-8df0-467c-af91-ad863667138b" containerName="cinder-db-sync" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.946666 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9823f75-8df0-467c-af91-ad863667138b" containerName="cinder-db-sync" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.947009 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" containerName="dnsmasq-dns" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.947030 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9823f75-8df0-467c-af91-ad863667138b" containerName="cinder-db-sync" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.947923 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.971379 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-24xq8" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.971762 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.972467 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:42:07 crc kubenswrapper[4884]: I1128 15:42:07.989114 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.009372 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.066611 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.066873 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbfdb\" (UniqueName: \"kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.066933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.066987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.067033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.067063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.139985 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.153108 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169197 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169330 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169401 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169446 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.169512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbfdb\" (UniqueName: \"kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.171307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.183745 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.190190 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.194716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.202287 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.223636 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbfdb\" (UniqueName: \"kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb\") pod \"cinder-scheduler-0\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.267208 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5plpr\" (UniqueName: \"kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272297 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272345 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272374 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272400 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.272433 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.279165 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.281164 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.282486 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.289924 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.345198 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380618 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380656 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9j72\" (UniqueName: \"kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5plpr\" (UniqueName: \"kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380707 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380735 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380776 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380798 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380850 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380894 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.380939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.381727 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.382940 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.383506 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.403534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.421946 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5plpr\" (UniqueName: \"kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.430746 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-466lz\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.430864 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.482772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483220 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9j72\" (UniqueName: \"kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483253 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483361 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483388 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.483407 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.484889 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.485644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.507165 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.509075 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.509857 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.509934 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.535640 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9j72\" (UniqueName: \"kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72\") pod \"cinder-api-0\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.572312 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.707902 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a186df2-95e5-4f8c-9184-0eeef5af978c" path="/var/lib/kubelet/pods/1a186df2-95e5-4f8c-9184-0eeef5af978c/volumes" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.708632 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:08 crc kubenswrapper[4884]: I1128 15:42:08.962498 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.116939 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:42:09 crc kubenswrapper[4884]: W1128 15:42:09.124229 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9242cf5_22c8_48a9_9d72_6027ba8f5fa6.slice/crio-1b44f7f3ca48e58364c0b940d876de1176ebd3892a99eec202bf5df7e33a5c6f WatchSource:0}: Error finding container 1b44f7f3ca48e58364c0b940d876de1176ebd3892a99eec202bf5df7e33a5c6f: Status 404 returned error can't find the container with id 1b44f7f3ca48e58364c0b940d876de1176ebd3892a99eec202bf5df7e33a5c6f Nov 28 15:42:09 crc kubenswrapper[4884]: W1128 15:42:09.389495 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4984d40_b18e_4392_8b77_f1155a2ae66d.slice/crio-cb5f1c5234a179873e2731b293221645bd1f0540ae34bc9343c824377ab4509c WatchSource:0}: Error finding container cb5f1c5234a179873e2731b293221645bd1f0540ae34bc9343c824377ab4509c: Status 404 returned error can't find the container with id cb5f1c5234a179873e2731b293221645bd1f0540ae34bc9343c824377ab4509c Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.403306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.676824 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.678273 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.683651 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.683707 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.683914 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-wfkkj" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.707440 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.739361 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tll75\" (UniqueName: \"kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.739875 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.739944 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.739987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.741912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerStarted","Data":"cb5f1c5234a179873e2731b293221645bd1f0540ae34bc9343c824377ab4509c"} Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.745933 4884 generic.go:334] "Generic (PLEG): container finished" podID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerID="45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637" exitCode=0 Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.746021 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" event={"ID":"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6","Type":"ContainerDied","Data":"45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637"} Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.746046 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" event={"ID":"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6","Type":"ContainerStarted","Data":"1b44f7f3ca48e58364c0b940d876de1176ebd3892a99eec202bf5df7e33a5c6f"} Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.752806 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerStarted","Data":"333b9b4d0ea69920066843298000988b818398f1778ba9f7a7af56fff0b74d83"} Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.840793 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.840837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.840858 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.840941 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tll75\" (UniqueName: \"kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.841582 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.846945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.850206 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:09 crc kubenswrapper[4884]: I1128 15:42:09.865363 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tll75\" (UniqueName: \"kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75\") pod \"openstackclient\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " pod="openstack/openstackclient" Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.003273 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.716588 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:42:10 crc kubenswrapper[4884]: W1128 15:42:10.748359 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4aa2db6_3fe4_43e0_8603_86dbd3a238e3.slice/crio-0d351d440994f94b60d9d630e567b274410b84e7afc2f278eb87a89f43d8a9fb WatchSource:0}: Error finding container 0d351d440994f94b60d9d630e567b274410b84e7afc2f278eb87a89f43d8a9fb: Status 404 returned error can't find the container with id 0d351d440994f94b60d9d630e567b274410b84e7afc2f278eb87a89f43d8a9fb Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.782764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerStarted","Data":"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51"} Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.805695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3","Type":"ContainerStarted","Data":"0d351d440994f94b60d9d630e567b274410b84e7afc2f278eb87a89f43d8a9fb"} Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.812429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" event={"ID":"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6","Type":"ContainerStarted","Data":"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527"} Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.813199 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:10 crc kubenswrapper[4884]: I1128 15:42:10.864026 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" podStartSLOduration=2.864010066 podStartE2EDuration="2.864010066s" podCreationTimestamp="2025-11-28 15:42:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:10.854491186 +0000 UTC m=+1370.417274987" watchObservedRunningTime="2025-11-28 15:42:10.864010066 +0000 UTC m=+1370.426793867" Nov 28 15:42:11 crc kubenswrapper[4884]: I1128 15:42:11.835863 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerStarted","Data":"2921d745ef3c9d0e98691f6f382d073544793b7bcfe713dbff6e153a54dc8b5d"} Nov 28 15:42:11 crc kubenswrapper[4884]: I1128 15:42:11.840570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerStarted","Data":"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4"} Nov 28 15:42:11 crc kubenswrapper[4884]: I1128 15:42:11.840635 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:42:11 crc kubenswrapper[4884]: I1128 15:42:11.884048 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.884028344 podStartE2EDuration="3.884028344s" podCreationTimestamp="2025-11-28 15:42:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:11.869523553 +0000 UTC m=+1371.432307354" watchObservedRunningTime="2025-11-28 15:42:11.884028344 +0000 UTC m=+1371.446812145" Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.085151 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.236296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.305448 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.305669 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" containerID="cri-o://cac4ad84ecf6419d28c8c00201ea020c4f4d4f1a1a5c60443d2e5da27ea51d0f" gracePeriod=30 Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.305957 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" containerID="cri-o://3f5da2b67edbeba186939a6c7b896f0e81ce009bf1392ad45ba9fd37e154916b" gracePeriod=30 Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.324163 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.850964 4884 generic.go:334] "Generic (PLEG): container finished" podID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerID="cac4ad84ecf6419d28c8c00201ea020c4f4d4f1a1a5c60443d2e5da27ea51d0f" exitCode=143 Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.851040 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerDied","Data":"cac4ad84ecf6419d28c8c00201ea020c4f4d4f1a1a5c60443d2e5da27ea51d0f"} Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.853668 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerStarted","Data":"2fbb23b57369971df5b8063c0b8a5728034a15e9be3876a9b9b19306bfce43cb"} Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.874265 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.717190057 podStartE2EDuration="5.874244351s" podCreationTimestamp="2025-11-28 15:42:07 +0000 UTC" firstStartedPulling="2025-11-28 15:42:09.010658098 +0000 UTC m=+1368.573441899" lastFinishedPulling="2025-11-28 15:42:10.167712392 +0000 UTC m=+1369.730496193" observedRunningTime="2025-11-28 15:42:12.868862391 +0000 UTC m=+1372.431646192" watchObservedRunningTime="2025-11-28 15:42:12.874244351 +0000 UTC m=+1372.437028152" Nov 28 15:42:12 crc kubenswrapper[4884]: I1128 15:42:12.988953 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:13 crc kubenswrapper[4884]: I1128 15:42:13.283542 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:42:13 crc kubenswrapper[4884]: I1128 15:42:13.870366 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api-log" containerID="cri-o://ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" gracePeriod=30 Nov 28 15:42:13 crc kubenswrapper[4884]: I1128 15:42:13.870444 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api" containerID="cri-o://61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" gracePeriod=30 Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.496728 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557259 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557350 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9j72\" (UniqueName: \"kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557416 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557519 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557562 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.557660 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id\") pod \"f4984d40-b18e-4392-8b77-f1155a2ae66d\" (UID: \"f4984d40-b18e-4392-8b77-f1155a2ae66d\") " Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.558154 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.559539 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs" (OuterVolumeSpecName: "logs") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.571833 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts" (OuterVolumeSpecName: "scripts") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.572176 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.575607 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72" (OuterVolumeSpecName: "kube-api-access-h9j72") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "kube-api-access-h9j72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.612743 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.625192 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data" (OuterVolumeSpecName: "config-data") pod "f4984d40-b18e-4392-8b77-f1155a2ae66d" (UID: "f4984d40-b18e-4392-8b77-f1155a2ae66d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.659547 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.659774 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.659860 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4984d40-b18e-4392-8b77-f1155a2ae66d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.659917 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.659977 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9j72\" (UniqueName: \"kubernetes.io/projected/f4984d40-b18e-4392-8b77-f1155a2ae66d-kube-api-access-h9j72\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.660033 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4984d40-b18e-4392-8b77-f1155a2ae66d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.660096 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4984d40-b18e-4392-8b77-f1155a2ae66d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881056 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerID="61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" exitCode=0 Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881141 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerDied","Data":"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4"} Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerDied","Data":"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51"} Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881251 4884 scope.go:117] "RemoveContainer" containerID="61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881148 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerID="ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" exitCode=143 Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.881382 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4984d40-b18e-4392-8b77-f1155a2ae66d","Type":"ContainerDied","Data":"cb5f1c5234a179873e2731b293221645bd1f0540ae34bc9343c824377ab4509c"} Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.906833 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.911849 4884 scope.go:117] "RemoveContainer" containerID="ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.918112 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.929350 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:14 crc kubenswrapper[4884]: E1128 15:42:14.929922 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api-log" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.929996 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api-log" Nov 28 15:42:14 crc kubenswrapper[4884]: E1128 15:42:14.930061 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.930128 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.930346 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api-log" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.930420 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" containerName="cinder-api" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.932189 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.935534 4884 scope.go:117] "RemoveContainer" containerID="61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.943644 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.943880 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.944153 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:42:14 crc kubenswrapper[4884]: E1128 15:42:14.949694 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4\": container with ID starting with 61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4 not found: ID does not exist" containerID="61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.950189 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4"} err="failed to get container status \"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4\": rpc error: code = NotFound desc = could not find container \"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4\": container with ID starting with 61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4 not found: ID does not exist" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.950276 4884 scope.go:117] "RemoveContainer" containerID="ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" Nov 28 15:42:14 crc kubenswrapper[4884]: E1128 15:42:14.950928 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51\": container with ID starting with ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51 not found: ID does not exist" containerID="ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.950989 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51"} err="failed to get container status \"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51\": rpc error: code = NotFound desc = could not find container \"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51\": container with ID starting with ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51 not found: ID does not exist" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.951018 4884 scope.go:117] "RemoveContainer" containerID="61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.953489 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4"} err="failed to get container status \"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4\": rpc error: code = NotFound desc = could not find container \"61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4\": container with ID starting with 61181d00001ee446096409375d8d170767d61dfd4a88fd2047a71bdbe0c0d1c4 not found: ID does not exist" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.953639 4884 scope.go:117] "RemoveContainer" containerID="ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.955312 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.959656 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51"} err="failed to get container status \"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51\": rpc error: code = NotFound desc = could not find container \"ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51\": container with ID starting with ce0815a36ad34cdcf1e5e8f375975af40b5878d768de2964ae68d1d7b9c92f51 not found: ID does not exist" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.969172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.970798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.970887 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.970959 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.971005 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.971038 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.971208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.971438 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:14 crc kubenswrapper[4884]: I1128 15:42:14.971704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2ljx\" (UniqueName: \"kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074436 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074628 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074690 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2ljx\" (UniqueName: \"kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.074967 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.075260 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.075274 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.075490 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.075571 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.075991 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.082364 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.084101 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.084505 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.085499 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.085653 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.086507 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.092607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2ljx\" (UniqueName: \"kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx\") pod \"cinder-api-0\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.252262 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.713902 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:42:15 crc kubenswrapper[4884]: W1128 15:42:15.718057 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c94e18c_15c4_4ef6_929f_c1941dbd3919.slice/crio-83827b6cc313c6873782279a18c96b5c020a7410f947b34200c5517f0d6476fe WatchSource:0}: Error finding container 83827b6cc313c6873782279a18c96b5c020a7410f947b34200c5517f0d6476fe: Status 404 returned error can't find the container with id 83827b6cc313c6873782279a18c96b5c020a7410f947b34200c5517f0d6476fe Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.892729 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.894685 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.898681 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.899638 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.899671 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.909353 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.945157 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerStarted","Data":"83827b6cc313c6873782279a18c96b5c020a7410f947b34200c5517f0d6476fe"} Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.992463 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.992535 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.992569 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.992771 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8778\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.992995 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.993075 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.993191 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:15 crc kubenswrapper[4884]: I1128 15:42:15.993215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095018 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8778\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095123 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095192 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095207 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.095290 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.096181 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.096250 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.104861 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.104863 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.105109 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.105312 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.105486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.112433 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8778\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778\") pod \"swift-proxy-988fdb959-xkp66\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.229159 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.703815 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4984d40-b18e-4392-8b77-f1155a2ae66d" path="/var/lib/kubelet/pods/f4984d40-b18e-4392-8b77-f1155a2ae66d/volumes" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.775145 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:34942->10.217.0.158:9311: read: connection reset by peer" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.775213 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:34940->10.217.0.158:9311: read: connection reset by peer" Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.895901 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:42:16 crc kubenswrapper[4884]: W1128 15:42:16.905675 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf788ec00_6116_4f7f_ac08_21623599090d.slice/crio-b555ae7c0c5977cc1d9b60f0509852e6ae44111472e28406dc152276d3889743 WatchSource:0}: Error finding container b555ae7c0c5977cc1d9b60f0509852e6ae44111472e28406dc152276d3889743: Status 404 returned error can't find the container with id b555ae7c0c5977cc1d9b60f0509852e6ae44111472e28406dc152276d3889743 Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.958360 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerStarted","Data":"b555ae7c0c5977cc1d9b60f0509852e6ae44111472e28406dc152276d3889743"} Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.960731 4884 generic.go:334] "Generic (PLEG): container finished" podID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerID="3f5da2b67edbeba186939a6c7b896f0e81ce009bf1392ad45ba9fd37e154916b" exitCode=0 Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.960774 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerDied","Data":"3f5da2b67edbeba186939a6c7b896f0e81ce009bf1392ad45ba9fd37e154916b"} Nov 28 15:42:16 crc kubenswrapper[4884]: I1128 15:42:16.961917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerStarted","Data":"754a22031c7e79239a68f9b96363ce0d184e5432cae3b7213429811a1a369d36"} Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.946980 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.947961 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-central-agent" containerID="cri-o://4e4cd6967466ab3a0672d9d4bc02337b1a82e6fcde1fe6e097fa23def5ed621e" gracePeriod=30 Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.948059 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="sg-core" containerID="cri-o://15de6a2d92b593a2651de65bd3f3d4f6802de064d076d8a610b3a2adef599b99" gracePeriod=30 Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.948305 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="proxy-httpd" containerID="cri-o://e1dd94495ef2e1dc1412067054aa71f40a0bfaf391be7d01ad919cd3da77602e" gracePeriod=30 Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.948116 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-notification-agent" containerID="cri-o://481a33cbd27fc2f0bff62122445eb65934ae3a3b51f85ae80f7c70b42851b13e" gracePeriod=30 Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.956317 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.159:3000/\": EOF" Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.976759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerStarted","Data":"c9fbb45580ab524d334d02b6207e2923abff684e828f592b260b02070af2c617"} Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.979733 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerStarted","Data":"3abb3a7245fdef87ef691fa6295e4f7c18af329b641de8d6a74c5a455959669f"} Nov 28 15:42:17 crc kubenswrapper[4884]: I1128 15:42:17.980035 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.005598 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.005578339 podStartE2EDuration="4.005578339s" podCreationTimestamp="2025-11-28 15:42:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:17.998533749 +0000 UTC m=+1377.561317560" watchObservedRunningTime="2025-11-28 15:42:18.005578339 +0000 UTC m=+1377.568362140" Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.530360 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.576240 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.604721 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.650650 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.650928 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="dnsmasq-dns" containerID="cri-o://bbcdc8a6448f7e66d3e117bb173765f368c6f842b4073e94576bd04ba80b446f" gracePeriod=10 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994163 4884 generic.go:334] "Generic (PLEG): container finished" podID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerID="e1dd94495ef2e1dc1412067054aa71f40a0bfaf391be7d01ad919cd3da77602e" exitCode=0 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994372 4884 generic.go:334] "Generic (PLEG): container finished" podID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerID="15de6a2d92b593a2651de65bd3f3d4f6802de064d076d8a610b3a2adef599b99" exitCode=2 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994380 4884 generic.go:334] "Generic (PLEG): container finished" podID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerID="4e4cd6967466ab3a0672d9d4bc02337b1a82e6fcde1fe6e097fa23def5ed621e" exitCode=0 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994430 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerDied","Data":"e1dd94495ef2e1dc1412067054aa71f40a0bfaf391be7d01ad919cd3da77602e"} Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994454 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerDied","Data":"15de6a2d92b593a2651de65bd3f3d4f6802de064d076d8a610b3a2adef599b99"} Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.994464 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerDied","Data":"4e4cd6967466ab3a0672d9d4bc02337b1a82e6fcde1fe6e097fa23def5ed621e"} Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.998196 4884 generic.go:334] "Generic (PLEG): container finished" podID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerID="bbcdc8a6448f7e66d3e117bb173765f368c6f842b4073e94576bd04ba80b446f" exitCode=0 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.998232 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" event={"ID":"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a","Type":"ContainerDied","Data":"bbcdc8a6448f7e66d3e117bb173765f368c6f842b4073e94576bd04ba80b446f"} Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.998565 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="cinder-scheduler" containerID="cri-o://2921d745ef3c9d0e98691f6f382d073544793b7bcfe713dbff6e153a54dc8b5d" gracePeriod=30 Nov 28 15:42:18 crc kubenswrapper[4884]: I1128 15:42:18.998944 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="probe" containerID="cri-o://2fbb23b57369971df5b8063c0b8a5728034a15e9be3876a9b9b19306bfce43cb" gracePeriod=30 Nov 28 15:42:19 crc kubenswrapper[4884]: I1128 15:42:19.597969 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:19 crc kubenswrapper[4884]: I1128 15:42:19.598249 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-log" containerID="cri-o://3a40d4c9d40608c334700862e223be9927b9d8a8774cfac47e1dd1e2a297ca90" gracePeriod=30 Nov 28 15:42:19 crc kubenswrapper[4884]: I1128 15:42:19.598556 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-httpd" containerID="cri-o://647bdd1a93a1e22c498285d4cea6e39103c8031829ffee33e1eab7e3b045b02c" gracePeriod=30 Nov 28 15:42:19 crc kubenswrapper[4884]: I1128 15:42:19.987532 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.032841 4884 generic.go:334] "Generic (PLEG): container finished" podID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerID="3a40d4c9d40608c334700862e223be9927b9d8a8774cfac47e1dd1e2a297ca90" exitCode=143 Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.032898 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerDied","Data":"3a40d4c9d40608c334700862e223be9927b9d8a8774cfac47e1dd1e2a297ca90"} Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.036216 4884 generic.go:334] "Generic (PLEG): container finished" podID="2902a021-ed80-45d2-b892-d91cc119f140" containerID="2fbb23b57369971df5b8063c0b8a5728034a15e9be3876a9b9b19306bfce43cb" exitCode=0 Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.036247 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerDied","Data":"2fbb23b57369971df5b8063c0b8a5728034a15e9be3876a9b9b19306bfce43cb"} Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.351190 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.351492 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-log" containerID="cri-o://010b40b6e1ffcab5263272b1d4bc325f6b108116be5718e9cd626f411cd03097" gracePeriod=30 Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.351608 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-httpd" containerID="cri-o://12844c127d5f9413cee20953145554f58e25d37c157f4afdbc8234813085905c" gracePeriod=30 Nov 28 15:42:20 crc kubenswrapper[4884]: I1128 15:42:20.978932 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.157:5353: connect: connection refused" Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.048192 4884 generic.go:334] "Generic (PLEG): container finished" podID="9af26263-cd93-4081-bdcd-518ad0587028" containerID="010b40b6e1ffcab5263272b1d4bc325f6b108116be5718e9cd626f411cd03097" exitCode=143 Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.048285 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerDied","Data":"010b40b6e1ffcab5263272b1d4bc325f6b108116be5718e9cd626f411cd03097"} Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.051515 4884 generic.go:334] "Generic (PLEG): container finished" podID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerID="481a33cbd27fc2f0bff62122445eb65934ae3a3b51f85ae80f7c70b42851b13e" exitCode=0 Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.051551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerDied","Data":"481a33cbd27fc2f0bff62122445eb65934ae3a3b51f85ae80f7c70b42851b13e"} Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.242962 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.243057 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.243193 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.243932 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:42:21 crc kubenswrapper[4884]: I1128 15:42:21.244004 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0" gracePeriod=600 Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.068727 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0" exitCode=0 Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.068827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0"} Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.069000 4884 scope.go:117] "RemoveContainer" containerID="c832312ab3e30450a9cf06ba49e8c224ee46755e5134c05566d8c182c4c44cfc" Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.148687 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.205016 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.205264 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-657c567946-l5n4g" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-api" containerID="cri-o://f9802f280f0524d6a7af18d87d664c767e19efd621585320045209b91d143e3d" gracePeriod=30 Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.205391 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-657c567946-l5n4g" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-httpd" containerID="cri-o://1b21629c9f6a6e80ef6dd9b05cf79286957a70256a474418362e2ecb678ee394" gracePeriod=30 Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.883539 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.148:9292/healthcheck\": read tcp 10.217.0.2:59136->10.217.0.148:9292: read: connection reset by peer" Nov 28 15:42:22 crc kubenswrapper[4884]: I1128 15:42:22.883866 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.148:9292/healthcheck\": read tcp 10.217.0.2:59138->10.217.0.148:9292: read: connection reset by peer" Nov 28 15:42:23 crc kubenswrapper[4884]: I1128 15:42:23.080594 4884 generic.go:334] "Generic (PLEG): container finished" podID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerID="1b21629c9f6a6e80ef6dd9b05cf79286957a70256a474418362e2ecb678ee394" exitCode=0 Nov 28 15:42:23 crc kubenswrapper[4884]: I1128 15:42:23.080639 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerDied","Data":"1b21629c9f6a6e80ef6dd9b05cf79286957a70256a474418362e2ecb678ee394"} Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.111669 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" event={"ID":"56aa1c8e-ed74-42bf-8556-965f37e377b3","Type":"ContainerDied","Data":"5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25"} Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.112053 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e4c04f2ffef7385cba95f8e766321e4a84a98d4a1f1b41106e3b8ac536bdf25" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.129427 4884 generic.go:334] "Generic (PLEG): container finished" podID="2902a021-ed80-45d2-b892-d91cc119f140" containerID="2921d745ef3c9d0e98691f6f382d073544793b7bcfe713dbff6e153a54dc8b5d" exitCode=0 Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.129519 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerDied","Data":"2921d745ef3c9d0e98691f6f382d073544793b7bcfe713dbff6e153a54dc8b5d"} Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.151024 4884 generic.go:334] "Generic (PLEG): container finished" podID="9af26263-cd93-4081-bdcd-518ad0587028" containerID="12844c127d5f9413cee20953145554f58e25d37c157f4afdbc8234813085905c" exitCode=0 Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.151127 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerDied","Data":"12844c127d5f9413cee20953145554f58e25d37c157f4afdbc8234813085905c"} Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.169235 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.169483 4884 generic.go:334] "Generic (PLEG): container finished" podID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerID="647bdd1a93a1e22c498285d4cea6e39103c8031829ffee33e1eab7e3b045b02c" exitCode=0 Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.169521 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerDied","Data":"647bdd1a93a1e22c498285d4cea6e39103c8031829ffee33e1eab7e3b045b02c"} Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.268161 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data\") pod \"56aa1c8e-ed74-42bf-8556-965f37e377b3\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.268689 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp6dg\" (UniqueName: \"kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg\") pod \"56aa1c8e-ed74-42bf-8556-965f37e377b3\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.268767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs\") pod \"56aa1c8e-ed74-42bf-8556-965f37e377b3\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.268800 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle\") pod \"56aa1c8e-ed74-42bf-8556-965f37e377b3\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.268835 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom\") pod \"56aa1c8e-ed74-42bf-8556-965f37e377b3\" (UID: \"56aa1c8e-ed74-42bf-8556-965f37e377b3\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.269385 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs" (OuterVolumeSpecName: "logs") pod "56aa1c8e-ed74-42bf-8556-965f37e377b3" (UID: "56aa1c8e-ed74-42bf-8556-965f37e377b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.281708 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg" (OuterVolumeSpecName: "kube-api-access-wp6dg") pod "56aa1c8e-ed74-42bf-8556-965f37e377b3" (UID: "56aa1c8e-ed74-42bf-8556-965f37e377b3"). InnerVolumeSpecName "kube-api-access-wp6dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.294301 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "56aa1c8e-ed74-42bf-8556-965f37e377b3" (UID: "56aa1c8e-ed74-42bf-8556-965f37e377b3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.346679 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.369922 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czvzn\" (UniqueName: \"kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370013 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370043 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370068 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370104 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370166 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config\") pod \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\" (UID: \"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370619 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370636 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp6dg\" (UniqueName: \"kubernetes.io/projected/56aa1c8e-ed74-42bf-8556-965f37e377b3-kube-api-access-wp6dg\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.370646 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/56aa1c8e-ed74-42bf-8556-965f37e377b3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.380297 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn" (OuterVolumeSpecName: "kube-api-access-czvzn") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "kube-api-access-czvzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.472620 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czvzn\" (UniqueName: \"kubernetes.io/projected/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-kube-api-access-czvzn\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.576293 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.579007 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56aa1c8e-ed74-42bf-8556-965f37e377b3" (UID: "56aa1c8e-ed74-42bf-8556-965f37e377b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.631871 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data" (OuterVolumeSpecName: "config-data") pod "56aa1c8e-ed74-42bf-8556-965f37e377b3" (UID: "56aa1c8e-ed74-42bf-8556-965f37e377b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.676783 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjhjt\" (UniqueName: \"kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.679663 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.679807 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.679907 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.680055 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.680295 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.680569 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data\") pod \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\" (UID: \"7fd62bec-641b-49b7-b81b-9f6e127d36c5\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.681363 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.681386 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56aa1c8e-ed74-42bf-8556-965f37e377b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.681419 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.681954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.702205 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt" (OuterVolumeSpecName: "kube-api-access-fjhjt") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "kube-api-access-fjhjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.716724 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts" (OuterVolumeSpecName: "scripts") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.754773 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.763890 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config" (OuterVolumeSpecName: "config") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782466 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782577 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782608 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782690 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6mrb\" (UniqueName: \"kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782741 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.782766 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.787684 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data\") pod \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\" (UID: \"46c5fe18-ffeb-47cf-aede-0d6a83b77d89\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.797500 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.797790 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs" (OuterVolumeSpecName: "logs") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808019 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808049 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808062 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7fd62bec-641b-49b7-b81b-9f6e127d36c5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808074 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808100 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjhjt\" (UniqueName: \"kubernetes.io/projected/7fd62bec-641b-49b7-b81b-9f6e127d36c5-kube-api-access-fjhjt\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808110 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.808118 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.815225 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.829243 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts" (OuterVolumeSpecName: "scripts") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.829371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb" (OuterVolumeSpecName: "kube-api-access-r6mrb") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "kube-api-access-r6mrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.829675 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.832461 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911674 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvdmc\" (UniqueName: \"kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911712 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911735 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911784 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911835 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbfdb\" (UniqueName: \"kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911914 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911952 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.911978 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.912065 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.912098 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.912121 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.912135 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data\") pod \"9af26263-cd93-4081-bdcd-518ad0587028\" (UID: \"9af26263-cd93-4081-bdcd-518ad0587028\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.912156 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data\") pod \"2902a021-ed80-45d2-b892-d91cc119f140\" (UID: \"2902a021-ed80-45d2-b892-d91cc119f140\") " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.913623 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.917659 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.917696 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.917709 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6mrb\" (UniqueName: \"kubernetes.io/projected/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-kube-api-access-r6mrb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.917723 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.931255 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc" (OuterVolumeSpecName: "kube-api-access-fvdmc") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "kube-api-access-fvdmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.933471 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs" (OuterVolumeSpecName: "logs") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.933940 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts" (OuterVolumeSpecName: "scripts") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.933977 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.935666 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.960217 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb" (OuterVolumeSpecName: "kube-api-access-gbfdb") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "kube-api-access-gbfdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.960650 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts" (OuterVolumeSpecName: "scripts") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:24 crc kubenswrapper[4884]: I1128 15:42:24.960688 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.010189 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021396 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021456 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021468 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbfdb\" (UniqueName: \"kubernetes.io/projected/2902a021-ed80-45d2-b892-d91cc119f140-kube-api-access-gbfdb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021477 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af26263-cd93-4081-bdcd-518ad0587028-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021487 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2902a021-ed80-45d2-b892-d91cc119f140-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021495 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021503 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021513 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvdmc\" (UniqueName: \"kubernetes.io/projected/9af26263-cd93-4081-bdcd-518ad0587028-kube-api-access-fvdmc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.021520 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.024216 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.026064 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.033212 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.046836 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.047613 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.061464 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.115894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" (UID: "408db6ff-1c9d-4cc1-beba-096fa0ebbd2a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.126842 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127004 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127035 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127044 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127054 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127063 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127072 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127080 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.127103 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.179005 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.182579 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-7h82d" event={"ID":"408db6ff-1c9d-4cc1-beba-096fa0ebbd2a","Type":"ContainerDied","Data":"f91d7fb5c8f52d0f11988f85a6b0df1fb303512e02341fee3a4e92734fc7faa6"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.182630 4884 scope.go:117] "RemoveContainer" containerID="bbcdc8a6448f7e66d3e117bb173765f368c6f842b4073e94576bd04ba80b446f" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.185713 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9af26263-cd93-4081-bdcd-518ad0587028","Type":"ContainerDied","Data":"48513c64d92de00b4cd4b607c74300116e6b1f63a4ebea01bef53dc6acbc36c1"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.185799 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.191633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerStarted","Data":"ae366ef4151eca779ecbf163f7ffdccba3b64e846702b94d2bd3434c05e40d34"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.192366 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.192382 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.197370 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2902a021-ed80-45d2-b892-d91cc119f140","Type":"ContainerDied","Data":"333b9b4d0ea69920066843298000988b818398f1778ba9f7a7af56fff0b74d83"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.197459 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.212862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.214246 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.224139 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-988fdb959-xkp66" podStartSLOduration=10.224116008 podStartE2EDuration="10.224116008s" podCreationTimestamp="2025-11-28 15:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:25.216642378 +0000 UTC m=+1384.779426189" watchObservedRunningTime="2025-11-28 15:42:25.224116008 +0000 UTC m=+1384.786899819" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.226942 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3","Type":"ContainerStarted","Data":"c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.230667 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.230985 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"46c5fe18-ffeb-47cf-aede-0d6a83b77d89","Type":"ContainerDied","Data":"9b42b4900ae595a7d91289c6a6be4e53ab0e5313a1a188443876109297561868"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.234464 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.234750 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.234756 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7fd62bec-641b-49b7-b81b-9f6e127d36c5","Type":"ContainerDied","Data":"28ad68ceff5f0f967731e7bf8cc9040edd0e4ee6d744fecf560bed0140bd90bf"} Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.235458 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.242312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.242364 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-988fdb959-xkp66" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.244375 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-988fdb959-xkp66" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.276221 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data" (OuterVolumeSpecName: "config-data") pod "46c5fe18-ffeb-47cf-aede-0d6a83b77d89" (UID: "46c5fe18-ffeb-47cf-aede-0d6a83b77d89"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.299598 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data" (OuterVolumeSpecName: "config-data") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.314331 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9af26263-cd93-4081-bdcd-518ad0587028" (UID: "9af26263-cd93-4081-bdcd-518ad0587028"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.316001 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.836600338 podStartE2EDuration="16.315978715s" podCreationTimestamp="2025-11-28 15:42:09 +0000 UTC" firstStartedPulling="2025-11-28 15:42:10.752486715 +0000 UTC m=+1370.315270506" lastFinishedPulling="2025-11-28 15:42:24.231865082 +0000 UTC m=+1383.794648883" observedRunningTime="2025-11-28 15:42:25.28842112 +0000 UTC m=+1384.851204921" watchObservedRunningTime="2025-11-28 15:42:25.315978715 +0000 UTC m=+1384.878762516" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.323693 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.331304 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data" (OuterVolumeSpecName: "config-data") pod "2902a021-ed80-45d2-b892-d91cc119f140" (UID: "2902a021-ed80-45d2-b892-d91cc119f140"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336679 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336708 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336719 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46c5fe18-ffeb-47cf-aede-0d6a83b77d89-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336728 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336736 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af26263-cd93-4081-bdcd-518ad0587028-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.336744 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2902a021-ed80-45d2-b892-d91cc119f140-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.360344 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data" (OuterVolumeSpecName: "config-data") pod "7fd62bec-641b-49b7-b81b-9f6e127d36c5" (UID: "7fd62bec-641b-49b7-b81b-9f6e127d36c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.376050 4884 scope.go:117] "RemoveContainer" containerID="3dc1b748efa35e876aa39c3b83376d72dddabbff27a394ab1563c9ec9fc73d4a" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.398158 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.409946 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-7h82d"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.420260 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.426063 4884 scope.go:117] "RemoveContainer" containerID="12844c127d5f9413cee20953145554f58e25d37c157f4afdbc8234813085905c" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.453018 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7fd62bec-641b-49b7-b81b-9f6e127d36c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.463849 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7cc8c9b9fd-zqcg4"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.509137 4884 scope.go:117] "RemoveContainer" containerID="010b40b6e1ffcab5263272b1d4bc325f6b108116be5718e9cd626f411cd03097" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.550524 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.553945 4884 scope.go:117] "RemoveContainer" containerID="2fbb23b57369971df5b8063c0b8a5728034a15e9be3876a9b9b19306bfce43cb" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.567581 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.591442 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608193 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608624 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-notification-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608643 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-notification-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608659 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="sg-core" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608665 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="sg-core" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608679 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608685 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608701 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608706 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608714 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608719 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608729 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="proxy-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608735 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="proxy-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608749 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-central-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608757 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-central-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608766 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608772 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608781 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608786 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608798 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608804 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608815 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608820 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608829 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="probe" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608836 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="probe" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608846 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="cinder-scheduler" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608852 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="cinder-scheduler" Nov 28 15:42:25 crc kubenswrapper[4884]: E1128 15:42:25.608861 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="init" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.608866 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="init" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609035 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="sg-core" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609052 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="probe" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609062 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2902a021-ed80-45d2-b892-d91cc119f140" containerName="cinder-scheduler" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609074 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-notification-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609117 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609130 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="ceilometer-central-agent" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609143 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609150 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609159 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609169 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-log" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609181 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" containerName="glance-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609191 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" containerName="proxy-httpd" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.609199 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" containerName="dnsmasq-dns" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.610203 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.613815 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.614034 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-qmtm9" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.624109 4884 scope.go:117] "RemoveContainer" containerID="2921d745ef3c9d0e98691f6f382d073544793b7bcfe713dbff6e153a54dc8b5d" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.624273 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.634759 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.636341 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.643773 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.657099 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.658521 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.664534 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.669014 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.672398 4884 scope.go:117] "RemoveContainer" containerID="647bdd1a93a1e22c498285d4cea6e39103c8031829ffee33e1eab7e3b045b02c" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.682357 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.698034 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.709518 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.728122 4884 scope.go:117] "RemoveContainer" containerID="3a40d4c9d40608c334700862e223be9927b9d8a8774cfac47e1dd1e2a297ca90" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.730621 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.747626 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.750885 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.755746 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758708 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758744 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758776 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758805 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758830 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758846 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j26jz\" (UniqueName: \"kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758940 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758956 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.758987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbw97\" (UniqueName: \"kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.759007 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.759026 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.759044 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.759059 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.760880 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.761270 4884 scope.go:117] "RemoveContainer" containerID="e1dd94495ef2e1dc1412067054aa71f40a0bfaf391be7d01ad919cd3da77602e" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.767701 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.769745 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.774150 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.774204 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.783548 4884 scope.go:117] "RemoveContainer" containerID="15de6a2d92b593a2651de65bd3f3d4f6802de064d076d8a610b3a2adef599b99" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.786749 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.814602 4884 scope.go:117] "RemoveContainer" containerID="481a33cbd27fc2f0bff62122445eb65934ae3a3b51f85ae80f7c70b42851b13e" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.820154 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.853833 4884 scope.go:117] "RemoveContainer" containerID="4e4cd6967466ab3a0672d9d4bc02337b1a82e6fcde1fe6e097fa23def5ed621e" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871315 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871372 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871401 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871497 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j26jz\" (UniqueName: \"kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871523 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6949\" (UniqueName: \"kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871566 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871595 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871653 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871721 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871743 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871767 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871796 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbw97\" (UniqueName: \"kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871895 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871932 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871947 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871963 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871979 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.871996 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtlrb\" (UniqueName: \"kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.872012 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.872028 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.872481 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.875377 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.875569 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.879410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.885798 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.885849 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.886123 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.891772 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.900834 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.901573 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.901619 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.906543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.920872 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbw97\" (UniqueName: \"kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97\") pod \"cinder-scheduler-0\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.926887 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.927824 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j26jz\" (UniqueName: \"kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz\") pod \"glance-default-internal-api-0\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.948589 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.977449 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.978998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6949\" (UniqueName: \"kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979053 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979077 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979168 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979183 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979236 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979290 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979311 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtlrb\" (UniqueName: \"kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979344 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.979642 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.982794 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.983032 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.983819 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.987227 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7cc8c9b9fd-zqcg4" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": dial tcp 10.217.0.158:9311: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.989856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.993583 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:42:25 crc kubenswrapper[4884]: I1128 15:42:25.996622 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.003347 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.003595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.003967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.004478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.006695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.007313 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.021201 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.021364 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6949\" (UniqueName: \"kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.023664 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtlrb\" (UniqueName: \"kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb\") pod \"ceilometer-0\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.043947 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.069497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.085305 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.270710 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.638729 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.704001 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2902a021-ed80-45d2-b892-d91cc119f140" path="/var/lib/kubelet/pods/2902a021-ed80-45d2-b892-d91cc119f140/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.704948 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="408db6ff-1c9d-4cc1-beba-096fa0ebbd2a" path="/var/lib/kubelet/pods/408db6ff-1c9d-4cc1-beba-096fa0ebbd2a/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.705803 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c5fe18-ffeb-47cf-aede-0d6a83b77d89" path="/var/lib/kubelet/pods/46c5fe18-ffeb-47cf-aede-0d6a83b77d89/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.707713 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56aa1c8e-ed74-42bf-8556-965f37e377b3" path="/var/lib/kubelet/pods/56aa1c8e-ed74-42bf-8556-965f37e377b3/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.708489 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fd62bec-641b-49b7-b81b-9f6e127d36c5" path="/var/lib/kubelet/pods/7fd62bec-641b-49b7-b81b-9f6e127d36c5/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.711487 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9af26263-cd93-4081-bdcd-518ad0587028" path="/var/lib/kubelet/pods/9af26263-cd93-4081-bdcd-518ad0587028/volumes" Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.724923 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.731916 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:42:26 crc kubenswrapper[4884]: I1128 15:42:26.942762 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:42:26 crc kubenswrapper[4884]: W1128 15:42:26.953817 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0e2713c_5c3d_457f_b444_122039f003d3.slice/crio-4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a WatchSource:0}: Error finding container 4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a: Status 404 returned error can't find the container with id 4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.075658 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.303825 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerStarted","Data":"4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a"} Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.306036 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerStarted","Data":"9005ffbb5a438b9c876bfee43bc71cc39f6744d2a83e6b7d8ee5891e4a8452b3"} Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.309083 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerStarted","Data":"728671745f46c957923baa92164b93a32b0f269345d3d8b0d48a5ec05ba50e20"} Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.310234 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerStarted","Data":"a3d365104e57088338695b8adb26a68005e2a0e7efc884213881152a54b62ee1"} Nov 28 15:42:27 crc kubenswrapper[4884]: I1128 15:42:27.783624 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 15:42:28 crc kubenswrapper[4884]: I1128 15:42:28.337309 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerStarted","Data":"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d"} Nov 28 15:42:28 crc kubenswrapper[4884]: I1128 15:42:28.372259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerStarted","Data":"ce2d368f9ec98e7e160e340cbf52ff57688a8c528c07d144e558a64b3ca00a72"} Nov 28 15:42:28 crc kubenswrapper[4884]: I1128 15:42:28.382584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerStarted","Data":"deec2653ca05ea2b2431e7f8f5e25dfcc18dcdf5ee831af2554036dbfce9676e"} Nov 28 15:42:28 crc kubenswrapper[4884]: I1128 15:42:28.388967 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerStarted","Data":"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545"} Nov 28 15:42:29 crc kubenswrapper[4884]: I1128 15:42:29.397283 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerStarted","Data":"1dba7e621dc762e39efb79478b3479dc3c3f7b54537bead34f7ee2f18daf1ebc"} Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.408369 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerStarted","Data":"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce"} Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.410253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerStarted","Data":"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd"} Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.412316 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerStarted","Data":"ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d"} Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.433587 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.433568081 podStartE2EDuration="5.433568081s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:30.433339916 +0000 UTC m=+1389.996123757" watchObservedRunningTime="2025-11-28 15:42:30.433568081 +0000 UTC m=+1389.996351892" Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.454731 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.454714082 podStartE2EDuration="5.454714082s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:30.452449507 +0000 UTC m=+1390.015233318" watchObservedRunningTime="2025-11-28 15:42:30.454714082 +0000 UTC m=+1390.017497883" Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.475454 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.475438442 podStartE2EDuration="5.475438442s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:42:30.472196984 +0000 UTC m=+1390.034980795" watchObservedRunningTime="2025-11-28 15:42:30.475438442 +0000 UTC m=+1390.038222243" Nov 28 15:42:30 crc kubenswrapper[4884]: I1128 15:42:30.994738 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:42:31 crc kubenswrapper[4884]: I1128 15:42:31.241621 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:42:31 crc kubenswrapper[4884]: I1128 15:42:31.448849 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerStarted","Data":"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0"} Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.458606 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerStarted","Data":"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864"} Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.458809 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-central-agent" containerID="cri-o://0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545" gracePeriod=30 Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.459184 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="proxy-httpd" containerID="cri-o://269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864" gracePeriod=30 Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.459262 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-notification-agent" containerID="cri-o://eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce" gracePeriod=30 Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.459267 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="sg-core" containerID="cri-o://ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0" gracePeriod=30 Nov 28 15:42:32 crc kubenswrapper[4884]: I1128 15:42:32.485877 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.388346231 podStartE2EDuration="7.485860201s" podCreationTimestamp="2025-11-28 15:42:25 +0000 UTC" firstStartedPulling="2025-11-28 15:42:26.738272141 +0000 UTC m=+1386.301055942" lastFinishedPulling="2025-11-28 15:42:31.835786111 +0000 UTC m=+1391.398569912" observedRunningTime="2025-11-28 15:42:32.478619406 +0000 UTC m=+1392.041403227" watchObservedRunningTime="2025-11-28 15:42:32.485860201 +0000 UTC m=+1392.048644002" Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470396 4884 generic.go:334] "Generic (PLEG): container finished" podID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerID="269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864" exitCode=0 Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470424 4884 generic.go:334] "Generic (PLEG): container finished" podID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerID="ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0" exitCode=2 Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470432 4884 generic.go:334] "Generic (PLEG): container finished" podID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerID="eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce" exitCode=0 Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470449 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerDied","Data":"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864"} Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470474 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerDied","Data":"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0"} Nov 28 15:42:33 crc kubenswrapper[4884]: I1128 15:42:33.470484 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerDied","Data":"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce"} Nov 28 15:42:35 crc kubenswrapper[4884]: I1128 15:42:35.949660 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:35 crc kubenswrapper[4884]: I1128 15:42:35.950686 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.003332 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.016544 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.087457 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.087499 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.093400 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.140621 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.153566 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166655 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166740 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166842 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166898 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.166967 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtlrb\" (UniqueName: \"kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.167110 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml\") pod \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\" (UID: \"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821\") " Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.167305 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.167430 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.167652 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.167667 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.190239 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb" (OuterVolumeSpecName: "kube-api-access-jtlrb") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "kube-api-access-jtlrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.195197 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts" (OuterVolumeSpecName: "scripts") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.211772 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.265845 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.269722 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.269749 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.269763 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtlrb\" (UniqueName: \"kubernetes.io/projected/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-kube-api-access-jtlrb\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.269777 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.290286 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.296952 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data" (OuterVolumeSpecName: "config-data") pod "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" (UID: "1ee4be18-5ef9-4c05-a6c6-97fcc93cf821"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.372954 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.503379 4884 generic.go:334] "Generic (PLEG): container finished" podID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerID="0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545" exitCode=0 Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.504646 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508162 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerDied","Data":"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545"} Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508203 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1ee4be18-5ef9-4c05-a6c6-97fcc93cf821","Type":"ContainerDied","Data":"9005ffbb5a438b9c876bfee43bc71cc39f6744d2a83e6b7d8ee5891e4a8452b3"} Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508218 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508234 4884 scope.go:117] "RemoveContainer" containerID="269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508919 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.508931 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.541833 4884 scope.go:117] "RemoveContainer" containerID="ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.545184 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.561281 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569373 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.569731 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="proxy-httpd" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569749 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="proxy-httpd" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.569771 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="sg-core" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569777 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="sg-core" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.569790 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-central-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569797 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-central-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.569808 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-notification-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569813 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-notification-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569968 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="proxy-httpd" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.569988 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-central-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.570002 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="ceilometer-notification-agent" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.570019 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" containerName="sg-core" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.572191 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.584660 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.594352 4884 scope.go:117] "RemoveContainer" containerID="eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.595053 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.653369 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.684542 4884 scope.go:117] "RemoveContainer" containerID="0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.707638 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ee4be18-5ef9-4c05-a6c6-97fcc93cf821" path="/var/lib/kubelet/pods/1ee4be18-5ef9-4c05-a6c6-97fcc93cf821/volumes" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709322 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709484 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709526 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709558 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709662 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709681 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.709749 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wscj7\" (UniqueName: \"kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.716397 4884 scope.go:117] "RemoveContainer" containerID="269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.717463 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864\": container with ID starting with 269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864 not found: ID does not exist" containerID="269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.717519 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864"} err="failed to get container status \"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864\": rpc error: code = NotFound desc = could not find container \"269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864\": container with ID starting with 269f4f602d08a5ef6b1ed5fe489997c5e5b1b82a0a1e09128efd922b10b44864 not found: ID does not exist" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.717541 4884 scope.go:117] "RemoveContainer" containerID="ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.721356 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0\": container with ID starting with ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0 not found: ID does not exist" containerID="ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.721418 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0"} err="failed to get container status \"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0\": rpc error: code = NotFound desc = could not find container \"ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0\": container with ID starting with ff223e04e12f8b80345f6b2a1891e683fc46793fe67c78c7d9235c4160684ac0 not found: ID does not exist" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.721443 4884 scope.go:117] "RemoveContainer" containerID="eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.722924 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce\": container with ID starting with eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce not found: ID does not exist" containerID="eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.722959 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce"} err="failed to get container status \"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce\": rpc error: code = NotFound desc = could not find container \"eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce\": container with ID starting with eea6b6327468d71c65d3525a67194f066f6bff1ef32c3cdc8f1e4a2add72abce not found: ID does not exist" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.722977 4884 scope.go:117] "RemoveContainer" containerID="0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545" Nov 28 15:42:36 crc kubenswrapper[4884]: E1128 15:42:36.727319 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545\": container with ID starting with 0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545 not found: ID does not exist" containerID="0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.727398 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545"} err="failed to get container status \"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545\": rpc error: code = NotFound desc = could not find container \"0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545\": container with ID starting with 0d5c6f13c83528e26a469b64c796d972ddebb0673d12c04dfecba0a4236da545 not found: ID does not exist" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.811795 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wscj7\" (UniqueName: \"kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.811903 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.811957 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.812006 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.812042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.812239 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.812267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.813559 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.814321 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.816686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.817390 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.820740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.828406 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.829898 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wscj7\" (UniqueName: \"kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7\") pod \"ceilometer-0\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " pod="openstack/ceilometer-0" Nov 28 15:42:36 crc kubenswrapper[4884]: I1128 15:42:36.968882 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:37 crc kubenswrapper[4884]: I1128 15:42:37.449533 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:37 crc kubenswrapper[4884]: I1128 15:42:37.513394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerStarted","Data":"8ac198a1f19f272fe722c833bbbabb60f04f7a3c57b8ed0b4efff8b89b67e70e"} Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.523208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerStarted","Data":"0d6e7d4debd6dea51db2ae141abf236c3c84e642874ebb5144481a4940d3e1a9"} Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.523259 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.523780 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.523266 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.523884 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.540663 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.557864 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.578675 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:42:38 crc kubenswrapper[4884]: I1128 15:42:38.784846 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.531081 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-dhdfs"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.533014 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.547602 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dhdfs"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.555175 4884 generic.go:334] "Generic (PLEG): container finished" podID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerID="f9802f280f0524d6a7af18d87d664c767e19efd621585320045209b91d143e3d" exitCode=0 Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.555255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerDied","Data":"f9802f280f0524d6a7af18d87d664c767e19efd621585320045209b91d143e3d"} Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.562847 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerStarted","Data":"8901fb47be9428d8c026398b471281119d553221e4e2c086ef486c1a68321c6b"} Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.563651 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgrrw\" (UniqueName: \"kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw\") pod \"nova-api-db-create-dhdfs\" (UID: \"47ca75c0-343b-4cef-82d6-24c20f014435\") " pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.627210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-zfz9w"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.628738 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.636751 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-zfz9w"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.668857 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgrrw\" (UniqueName: \"kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw\") pod \"nova-api-db-create-dhdfs\" (UID: \"47ca75c0-343b-4cef-82d6-24c20f014435\") " pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.722882 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgrrw\" (UniqueName: \"kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw\") pod \"nova-api-db-create-dhdfs\" (UID: \"47ca75c0-343b-4cef-82d6-24c20f014435\") " pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.775163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxjrn\" (UniqueName: \"kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn\") pod \"nova-cell0-db-create-zfz9w\" (UID: \"b6c45f17-c6a2-4e48-8f4b-507043c949c0\") " pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.777839 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-6kmhj"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.780593 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.792351 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-6kmhj"] Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.810023 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.861611 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.881383 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2p6d\" (UniqueName: \"kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d\") pod \"nova-cell1-db-create-6kmhj\" (UID: \"f7a1b531-4dbd-481a-a256-47393a6f53c5\") " pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.881481 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxjrn\" (UniqueName: \"kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn\") pod \"nova-cell0-db-create-zfz9w\" (UID: \"b6c45f17-c6a2-4e48-8f4b-507043c949c0\") " pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.926643 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxjrn\" (UniqueName: \"kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn\") pod \"nova-cell0-db-create-zfz9w\" (UID: \"b6c45f17-c6a2-4e48-8f4b-507043c949c0\") " pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.981818 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.983725 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhpmp\" (UniqueName: \"kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp\") pod \"300c7172-b015-47a7-b3cd-52b5e67fdf64\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.984212 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config\") pod \"300c7172-b015-47a7-b3cd-52b5e67fdf64\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.984470 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs\") pod \"300c7172-b015-47a7-b3cd-52b5e67fdf64\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.984550 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle\") pod \"300c7172-b015-47a7-b3cd-52b5e67fdf64\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.984586 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config\") pod \"300c7172-b015-47a7-b3cd-52b5e67fdf64\" (UID: \"300c7172-b015-47a7-b3cd-52b5e67fdf64\") " Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.985133 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2p6d\" (UniqueName: \"kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d\") pod \"nova-cell1-db-create-6kmhj\" (UID: \"f7a1b531-4dbd-481a-a256-47393a6f53c5\") " pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.993580 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "300c7172-b015-47a7-b3cd-52b5e67fdf64" (UID: "300c7172-b015-47a7-b3cd-52b5e67fdf64"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:39 crc kubenswrapper[4884]: I1128 15:42:39.995734 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp" (OuterVolumeSpecName: "kube-api-access-xhpmp") pod "300c7172-b015-47a7-b3cd-52b5e67fdf64" (UID: "300c7172-b015-47a7-b3cd-52b5e67fdf64"). InnerVolumeSpecName "kube-api-access-xhpmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.025041 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2p6d\" (UniqueName: \"kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d\") pod \"nova-cell1-db-create-6kmhj\" (UID: \"f7a1b531-4dbd-481a-a256-47393a6f53c5\") " pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.074563 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "300c7172-b015-47a7-b3cd-52b5e67fdf64" (UID: "300c7172-b015-47a7-b3cd-52b5e67fdf64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.080252 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config" (OuterVolumeSpecName: "config") pod "300c7172-b015-47a7-b3cd-52b5e67fdf64" (UID: "300c7172-b015-47a7-b3cd-52b5e67fdf64"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.093121 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.093148 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.093159 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhpmp\" (UniqueName: \"kubernetes.io/projected/300c7172-b015-47a7-b3cd-52b5e67fdf64-kube-api-access-xhpmp\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.093183 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.136241 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "300c7172-b015-47a7-b3cd-52b5e67fdf64" (UID: "300c7172-b015-47a7-b3cd-52b5e67fdf64"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.151534 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.196127 4884 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/300c7172-b015-47a7-b3cd-52b5e67fdf64-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.363303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-dhdfs"] Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.539057 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-zfz9w"] Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.584215 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerStarted","Data":"d692d93d96cf0e8702679a32e20bbfedf4ba4c5216ba2f9015b37652a588475b"} Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.587004 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zfz9w" event={"ID":"b6c45f17-c6a2-4e48-8f4b-507043c949c0","Type":"ContainerStarted","Data":"104c2e692c8925da45ab2eff334ce36c1e2fc8b7074c15f1c3055ba250e892d4"} Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.590028 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dhdfs" event={"ID":"47ca75c0-343b-4cef-82d6-24c20f014435","Type":"ContainerStarted","Data":"e76ece697bc20f9c1212326427e9ef6cf2785d7a427e496013967bb041fdd495"} Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.607816 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-657c567946-l5n4g" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.612264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-657c567946-l5n4g" event={"ID":"300c7172-b015-47a7-b3cd-52b5e67fdf64","Type":"ContainerDied","Data":"bde0abbf23b9fb0c53261fd85ff2eacf8d488c5a15cbb24f2a8f83a61d7faee4"} Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.612340 4884 scope.go:117] "RemoveContainer" containerID="1b21629c9f6a6e80ef6dd9b05cf79286957a70256a474418362e2ecb678ee394" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.647144 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.654611 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-657c567946-l5n4g"] Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.660799 4884 scope.go:117] "RemoveContainer" containerID="f9802f280f0524d6a7af18d87d664c767e19efd621585320045209b91d143e3d" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.711779 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" path="/var/lib/kubelet/pods/300c7172-b015-47a7-b3cd-52b5e67fdf64/volumes" Nov 28 15:42:40 crc kubenswrapper[4884]: I1128 15:42:40.712346 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-6kmhj"] Nov 28 15:42:40 crc kubenswrapper[4884]: W1128 15:42:40.741653 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7a1b531_4dbd_481a_a256_47393a6f53c5.slice/crio-819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03 WatchSource:0}: Error finding container 819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03: Status 404 returned error can't find the container with id 819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03 Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.623824 4884 generic.go:334] "Generic (PLEG): container finished" podID="b6c45f17-c6a2-4e48-8f4b-507043c949c0" containerID="e629d92bbf1f5998319592b655efddbcc1184d06484b8cc9eedf20db4d29406b" exitCode=0 Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.623894 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zfz9w" event={"ID":"b6c45f17-c6a2-4e48-8f4b-507043c949c0","Type":"ContainerDied","Data":"e629d92bbf1f5998319592b655efddbcc1184d06484b8cc9eedf20db4d29406b"} Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.626031 4884 generic.go:334] "Generic (PLEG): container finished" podID="47ca75c0-343b-4cef-82d6-24c20f014435" containerID="364c339d6ef9ee657fe476484782c8687c2924daa4dd5ab62181131d3568777e" exitCode=0 Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.626078 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dhdfs" event={"ID":"47ca75c0-343b-4cef-82d6-24c20f014435","Type":"ContainerDied","Data":"364c339d6ef9ee657fe476484782c8687c2924daa4dd5ab62181131d3568777e"} Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.631669 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7a1b531-4dbd-481a-a256-47393a6f53c5" containerID="a88799f33ef691b4ee8915163ec9d18f0972fe6e1536003ad001881fb31dca5f" exitCode=0 Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.631729 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6kmhj" event={"ID":"f7a1b531-4dbd-481a-a256-47393a6f53c5","Type":"ContainerDied","Data":"a88799f33ef691b4ee8915163ec9d18f0972fe6e1536003ad001881fb31dca5f"} Nov 28 15:42:41 crc kubenswrapper[4884]: I1128 15:42:41.631757 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6kmhj" event={"ID":"f7a1b531-4dbd-481a-a256-47393a6f53c5","Type":"ContainerStarted","Data":"819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03"} Nov 28 15:42:42 crc kubenswrapper[4884]: I1128 15:42:42.647570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerStarted","Data":"244987867fae530da3d0660c743782d71a8412ba55cb8a8652f471075131e415"} Nov 28 15:42:42 crc kubenswrapper[4884]: I1128 15:42:42.648054 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:42:42 crc kubenswrapper[4884]: I1128 15:42:42.671480 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.654527462 podStartE2EDuration="6.671460266s" podCreationTimestamp="2025-11-28 15:42:36 +0000 UTC" firstStartedPulling="2025-11-28 15:42:37.457430192 +0000 UTC m=+1397.020213993" lastFinishedPulling="2025-11-28 15:42:41.474362996 +0000 UTC m=+1401.037146797" observedRunningTime="2025-11-28 15:42:42.667015509 +0000 UTC m=+1402.229799310" watchObservedRunningTime="2025-11-28 15:42:42.671460266 +0000 UTC m=+1402.234244087" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.177037 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.185368 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.192365 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.273890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2p6d\" (UniqueName: \"kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d\") pod \"f7a1b531-4dbd-481a-a256-47393a6f53c5\" (UID: \"f7a1b531-4dbd-481a-a256-47393a6f53c5\") " Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.273987 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxjrn\" (UniqueName: \"kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn\") pod \"b6c45f17-c6a2-4e48-8f4b-507043c949c0\" (UID: \"b6c45f17-c6a2-4e48-8f4b-507043c949c0\") " Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.274058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgrrw\" (UniqueName: \"kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw\") pod \"47ca75c0-343b-4cef-82d6-24c20f014435\" (UID: \"47ca75c0-343b-4cef-82d6-24c20f014435\") " Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.279573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d" (OuterVolumeSpecName: "kube-api-access-z2p6d") pod "f7a1b531-4dbd-481a-a256-47393a6f53c5" (UID: "f7a1b531-4dbd-481a-a256-47393a6f53c5"). InnerVolumeSpecName "kube-api-access-z2p6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.280510 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw" (OuterVolumeSpecName: "kube-api-access-dgrrw") pod "47ca75c0-343b-4cef-82d6-24c20f014435" (UID: "47ca75c0-343b-4cef-82d6-24c20f014435"). InnerVolumeSpecName "kube-api-access-dgrrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.288443 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn" (OuterVolumeSpecName: "kube-api-access-hxjrn") pod "b6c45f17-c6a2-4e48-8f4b-507043c949c0" (UID: "b6c45f17-c6a2-4e48-8f4b-507043c949c0"). InnerVolumeSpecName "kube-api-access-hxjrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.376860 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2p6d\" (UniqueName: \"kubernetes.io/projected/f7a1b531-4dbd-481a-a256-47393a6f53c5-kube-api-access-z2p6d\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.376908 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxjrn\" (UniqueName: \"kubernetes.io/projected/b6c45f17-c6a2-4e48-8f4b-507043c949c0-kube-api-access-hxjrn\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.376929 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgrrw\" (UniqueName: \"kubernetes.io/projected/47ca75c0-343b-4cef-82d6-24c20f014435-kube-api-access-dgrrw\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.657042 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zfz9w" event={"ID":"b6c45f17-c6a2-4e48-8f4b-507043c949c0","Type":"ContainerDied","Data":"104c2e692c8925da45ab2eff334ce36c1e2fc8b7074c15f1c3055ba250e892d4"} Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.658261 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="104c2e692c8925da45ab2eff334ce36c1e2fc8b7074c15f1c3055ba250e892d4" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.657057 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zfz9w" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.658602 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-dhdfs" event={"ID":"47ca75c0-343b-4cef-82d6-24c20f014435","Type":"ContainerDied","Data":"e76ece697bc20f9c1212326427e9ef6cf2785d7a427e496013967bb041fdd495"} Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.658638 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-dhdfs" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.658633 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e76ece697bc20f9c1212326427e9ef6cf2785d7a427e496013967bb041fdd495" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.661362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-6kmhj" event={"ID":"f7a1b531-4dbd-481a-a256-47393a6f53c5","Type":"ContainerDied","Data":"819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03"} Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.661388 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="819c383d0036bf448d98a1872f3e3f0d1b37967dd2f829b0264b47f762b47d03" Nov 28 15:42:43 crc kubenswrapper[4884]: I1128 15:42:43.661373 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-6kmhj" Nov 28 15:42:48 crc kubenswrapper[4884]: I1128 15:42:48.782157 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:48 crc kubenswrapper[4884]: I1128 15:42:48.783051 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-central-agent" containerID="cri-o://0d6e7d4debd6dea51db2ae141abf236c3c84e642874ebb5144481a4940d3e1a9" gracePeriod=30 Nov 28 15:42:48 crc kubenswrapper[4884]: I1128 15:42:48.783170 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="proxy-httpd" containerID="cri-o://244987867fae530da3d0660c743782d71a8412ba55cb8a8652f471075131e415" gracePeriod=30 Nov 28 15:42:48 crc kubenswrapper[4884]: I1128 15:42:48.783221 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="sg-core" containerID="cri-o://d692d93d96cf0e8702679a32e20bbfedf4ba4c5216ba2f9015b37652a588475b" gracePeriod=30 Nov 28 15:42:48 crc kubenswrapper[4884]: I1128 15:42:48.783267 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-notification-agent" containerID="cri-o://8901fb47be9428d8c026398b471281119d553221e4e2c086ef486c1a68321c6b" gracePeriod=30 Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.749198 4884 generic.go:334] "Generic (PLEG): container finished" podID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerID="244987867fae530da3d0660c743782d71a8412ba55cb8a8652f471075131e415" exitCode=0 Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.749772 4884 generic.go:334] "Generic (PLEG): container finished" podID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerID="d692d93d96cf0e8702679a32e20bbfedf4ba4c5216ba2f9015b37652a588475b" exitCode=2 Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.749880 4884 generic.go:334] "Generic (PLEG): container finished" podID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerID="8901fb47be9428d8c026398b471281119d553221e4e2c086ef486c1a68321c6b" exitCode=0 Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.749968 4884 generic.go:334] "Generic (PLEG): container finished" podID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerID="0d6e7d4debd6dea51db2ae141abf236c3c84e642874ebb5144481a4940d3e1a9" exitCode=0 Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.749231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerDied","Data":"244987867fae530da3d0660c743782d71a8412ba55cb8a8652f471075131e415"} Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.750213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerDied","Data":"d692d93d96cf0e8702679a32e20bbfedf4ba4c5216ba2f9015b37652a588475b"} Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.750303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerDied","Data":"8901fb47be9428d8c026398b471281119d553221e4e2c086ef486c1a68321c6b"} Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.750365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerDied","Data":"0d6e7d4debd6dea51db2ae141abf236c3c84e642874ebb5144481a4940d3e1a9"} Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.760620 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2f46-account-create-jv9gr"] Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.761146 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6c45f17-c6a2-4e48-8f4b-507043c949c0" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761219 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6c45f17-c6a2-4e48-8f4b-507043c949c0" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.761293 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7a1b531-4dbd-481a-a256-47393a6f53c5" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761343 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7a1b531-4dbd-481a-a256-47393a6f53c5" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.761412 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761472 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.761540 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-api" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761590 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-api" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.761647 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47ca75c0-343b-4cef-82d6-24c20f014435" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761698 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="47ca75c0-343b-4cef-82d6-24c20f014435" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761911 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="47ca75c0-343b-4cef-82d6-24c20f014435" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.761985 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7a1b531-4dbd-481a-a256-47393a6f53c5" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.762128 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-api" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.762218 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="300c7172-b015-47a7-b3cd-52b5e67fdf64" containerName="neutron-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.762280 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6c45f17-c6a2-4e48-8f4b-507043c949c0" containerName="mariadb-database-create" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.764838 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.768674 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.771249 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2f46-account-create-jv9gr"] Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.833795 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896174 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896237 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wscj7\" (UniqueName: \"kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896328 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896434 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896479 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts\") pod \"c656f30f-d239-443e-95bf-1bab91ef52c5\" (UID: \"c656f30f-d239-443e-95bf-1bab91ef52c5\") " Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.896794 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq8zk\" (UniqueName: \"kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk\") pod \"nova-api-2f46-account-create-jv9gr\" (UID: \"ac05886e-6151-4d56-92dd-093c27c7c955\") " pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.897535 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.898415 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.905259 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts" (OuterVolumeSpecName: "scripts") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.905380 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7" (OuterVolumeSpecName: "kube-api-access-wscj7") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "kube-api-access-wscj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.955339 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-992b-account-create-j5mtz"] Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.956165 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="sg-core" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.956302 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="sg-core" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.956527 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-notification-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.956622 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-notification-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.956734 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-central-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.956822 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-central-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: E1128 15:42:49.957073 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="proxy-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.957239 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="proxy-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.957592 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-notification-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.957703 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="proxy-httpd" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.957806 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="sg-core" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.957945 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" containerName="ceilometer-central-agent" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.959968 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.962952 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.970136 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-992b-account-create-j5mtz"] Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.975362 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999473 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq8zk\" (UniqueName: \"kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk\") pod \"nova-api-2f46-account-create-jv9gr\" (UID: \"ac05886e-6151-4d56-92dd-093c27c7c955\") " pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999649 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999663 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999673 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c656f30f-d239-443e-95bf-1bab91ef52c5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999684 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:49 crc kubenswrapper[4884]: I1128 15:42:49.999697 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wscj7\" (UniqueName: \"kubernetes.io/projected/c656f30f-d239-443e-95bf-1bab91ef52c5-kube-api-access-wscj7\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.009575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.018790 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq8zk\" (UniqueName: \"kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk\") pod \"nova-api-2f46-account-create-jv9gr\" (UID: \"ac05886e-6151-4d56-92dd-093c27c7c955\") " pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.031389 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data" (OuterVolumeSpecName: "config-data") pod "c656f30f-d239-443e-95bf-1bab91ef52c5" (UID: "c656f30f-d239-443e-95bf-1bab91ef52c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.101624 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7szc6\" (UniqueName: \"kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6\") pod \"nova-cell0-992b-account-create-j5mtz\" (UID: \"a6386f1f-20c4-4856-8171-064b53c9fa7f\") " pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.102081 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.102140 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c656f30f-d239-443e-95bf-1bab91ef52c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.136256 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.152214 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-c1f2-account-create-vf7lg"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.154076 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.157113 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.161032 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c1f2-account-create-vf7lg"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.203414 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7szc6\" (UniqueName: \"kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6\") pod \"nova-cell0-992b-account-create-j5mtz\" (UID: \"a6386f1f-20c4-4856-8171-064b53c9fa7f\") " pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.219935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7szc6\" (UniqueName: \"kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6\") pod \"nova-cell0-992b-account-create-j5mtz\" (UID: \"a6386f1f-20c4-4856-8171-064b53c9fa7f\") " pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.304687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r54zc\" (UniqueName: \"kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc\") pod \"nova-cell1-c1f2-account-create-vf7lg\" (UID: \"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878\") " pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.406156 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r54zc\" (UniqueName: \"kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc\") pod \"nova-cell1-c1f2-account-create-vf7lg\" (UID: \"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878\") " pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.415556 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.422719 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r54zc\" (UniqueName: \"kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc\") pod \"nova-cell1-c1f2-account-create-vf7lg\" (UID: \"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878\") " pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.496932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.642994 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2f46-account-create-jv9gr"] Nov 28 15:42:50 crc kubenswrapper[4884]: W1128 15:42:50.650464 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac05886e_6151_4d56_92dd_093c27c7c955.slice/crio-cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1 WatchSource:0}: Error finding container cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1: Status 404 returned error can't find the container with id cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1 Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.781760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f46-account-create-jv9gr" event={"ID":"ac05886e-6151-4d56-92dd-093c27c7c955","Type":"ContainerStarted","Data":"cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1"} Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.784855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c656f30f-d239-443e-95bf-1bab91ef52c5","Type":"ContainerDied","Data":"8ac198a1f19f272fe722c833bbbabb60f04f7a3c57b8ed0b4efff8b89b67e70e"} Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.784891 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.784938 4884 scope.go:117] "RemoveContainer" containerID="244987867fae530da3d0660c743782d71a8412ba55cb8a8652f471075131e415" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.808687 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.817689 4884 scope.go:117] "RemoveContainer" containerID="d692d93d96cf0e8702679a32e20bbfedf4ba4c5216ba2f9015b37652a588475b" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.819354 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.832588 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.846668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.848576 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.848829 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.855493 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.858176 4884 scope.go:117] "RemoveContainer" containerID="8901fb47be9428d8c026398b471281119d553221e4e2c086ef486c1a68321c6b" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.869209 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-992b-account-create-j5mtz"] Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.914672 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.914826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zwxf\" (UniqueName: \"kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.914930 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.915009 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.915082 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.915150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.915233 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.919658 4884 scope.go:117] "RemoveContainer" containerID="0d6e7d4debd6dea51db2ae141abf236c3c84e642874ebb5144481a4940d3e1a9" Nov 28 15:42:50 crc kubenswrapper[4884]: W1128 15:42:50.968676 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9a69be8_8e9e_45b3_ab4d_b8f9a148e878.slice/crio-779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce WatchSource:0}: Error finding container 779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce: Status 404 returned error can't find the container with id 779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce Nov 28 15:42:50 crc kubenswrapper[4884]: I1128 15:42:50.976529 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c1f2-account-create-vf7lg"] Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016557 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016610 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016638 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016692 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.016843 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zwxf\" (UniqueName: \"kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.019637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.019713 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.023812 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.024062 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.024454 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.025734 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.032639 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zwxf\" (UniqueName: \"kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf\") pod \"ceilometer-0\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.176274 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.663409 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:42:51 crc kubenswrapper[4884]: W1128 15:42:51.676407 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8fe7b16_7b70_4153_85b1_acbbfbb9f30a.slice/crio-20080fbf6f161097292f8e6bb9cbd71547033b8ef4b5600b40a96d6737ab1d8c WatchSource:0}: Error finding container 20080fbf6f161097292f8e6bb9cbd71547033b8ef4b5600b40a96d6737ab1d8c: Status 404 returned error can't find the container with id 20080fbf6f161097292f8e6bb9cbd71547033b8ef4b5600b40a96d6737ab1d8c Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.814267 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac05886e-6151-4d56-92dd-093c27c7c955" containerID="7a2d5c88d65fad02b9525e7a41b69b86653082bc1c05fe974f5231b1c252b22a" exitCode=0 Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.814496 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f46-account-create-jv9gr" event={"ID":"ac05886e-6151-4d56-92dd-093c27c7c955","Type":"ContainerDied","Data":"7a2d5c88d65fad02b9525e7a41b69b86653082bc1c05fe974f5231b1c252b22a"} Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.818453 4884 generic.go:334] "Generic (PLEG): container finished" podID="e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" containerID="d6c08e0e8c4eb6f4610c81be9b7d6484ee5b9c70ede940abe57165a86cb85e5e" exitCode=0 Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.818609 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" event={"ID":"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878","Type":"ContainerDied","Data":"d6c08e0e8c4eb6f4610c81be9b7d6484ee5b9c70ede940abe57165a86cb85e5e"} Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.818773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" event={"ID":"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878","Type":"ContainerStarted","Data":"779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce"} Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.821285 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerStarted","Data":"20080fbf6f161097292f8e6bb9cbd71547033b8ef4b5600b40a96d6737ab1d8c"} Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.834004 4884 generic.go:334] "Generic (PLEG): container finished" podID="a6386f1f-20c4-4856-8171-064b53c9fa7f" containerID="db66a63c6b73949a4a381fd8c6a6996fc1d34737761429ce5548fc962cd511cd" exitCode=0 Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.834114 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-992b-account-create-j5mtz" event={"ID":"a6386f1f-20c4-4856-8171-064b53c9fa7f","Type":"ContainerDied","Data":"db66a63c6b73949a4a381fd8c6a6996fc1d34737761429ce5548fc962cd511cd"} Nov 28 15:42:51 crc kubenswrapper[4884]: I1128 15:42:51.834165 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-992b-account-create-j5mtz" event={"ID":"a6386f1f-20c4-4856-8171-064b53c9fa7f","Type":"ContainerStarted","Data":"791789de23988db8e3c240863c5574fe00724d1dbaf55370745ea670cbb6f700"} Nov 28 15:42:52 crc kubenswrapper[4884]: I1128 15:42:52.704535 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c656f30f-d239-443e-95bf-1bab91ef52c5" path="/var/lib/kubelet/pods/c656f30f-d239-443e-95bf-1bab91ef52c5/volumes" Nov 28 15:42:52 crc kubenswrapper[4884]: I1128 15:42:52.844275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerStarted","Data":"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8"} Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.271270 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.292990 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.302358 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.369226 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq8zk\" (UniqueName: \"kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk\") pod \"ac05886e-6151-4d56-92dd-093c27c7c955\" (UID: \"ac05886e-6151-4d56-92dd-093c27c7c955\") " Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.369727 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r54zc\" (UniqueName: \"kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc\") pod \"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878\" (UID: \"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878\") " Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.369797 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7szc6\" (UniqueName: \"kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6\") pod \"a6386f1f-20c4-4856-8171-064b53c9fa7f\" (UID: \"a6386f1f-20c4-4856-8171-064b53c9fa7f\") " Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.374675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6" (OuterVolumeSpecName: "kube-api-access-7szc6") pod "a6386f1f-20c4-4856-8171-064b53c9fa7f" (UID: "a6386f1f-20c4-4856-8171-064b53c9fa7f"). InnerVolumeSpecName "kube-api-access-7szc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.374883 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc" (OuterVolumeSpecName: "kube-api-access-r54zc") pod "e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" (UID: "e9a69be8-8e9e-45b3-ab4d-b8f9a148e878"). InnerVolumeSpecName "kube-api-access-r54zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.375542 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk" (OuterVolumeSpecName: "kube-api-access-gq8zk") pod "ac05886e-6151-4d56-92dd-093c27c7c955" (UID: "ac05886e-6151-4d56-92dd-093c27c7c955"). InnerVolumeSpecName "kube-api-access-gq8zk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.470975 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7szc6\" (UniqueName: \"kubernetes.io/projected/a6386f1f-20c4-4856-8171-064b53c9fa7f-kube-api-access-7szc6\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.471004 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq8zk\" (UniqueName: \"kubernetes.io/projected/ac05886e-6151-4d56-92dd-093c27c7c955-kube-api-access-gq8zk\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.471014 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r54zc\" (UniqueName: \"kubernetes.io/projected/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878-kube-api-access-r54zc\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.858275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-992b-account-create-j5mtz" event={"ID":"a6386f1f-20c4-4856-8171-064b53c9fa7f","Type":"ContainerDied","Data":"791789de23988db8e3c240863c5574fe00724d1dbaf55370745ea670cbb6f700"} Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.858363 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="791789de23988db8e3c240863c5574fe00724d1dbaf55370745ea670cbb6f700" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.860072 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2f46-account-create-jv9gr" event={"ID":"ac05886e-6151-4d56-92dd-093c27c7c955","Type":"ContainerDied","Data":"cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1"} Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.860130 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2f46-account-create-jv9gr" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.860152 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfe123b71877dfa8e4ed9ae2064181ffe1c53c2519f4c8742d6b8402eb8cc0d1" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.860428 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-992b-account-create-j5mtz" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.862030 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" event={"ID":"e9a69be8-8e9e-45b3-ab4d-b8f9a148e878","Type":"ContainerDied","Data":"779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce"} Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.862137 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="779da14908c91770177d2202fe6cd705a16c2de7997bec787ec53e3d34e588ce" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.862067 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c1f2-account-create-vf7lg" Nov 28 15:42:53 crc kubenswrapper[4884]: I1128 15:42:53.863959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerStarted","Data":"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6"} Nov 28 15:42:54 crc kubenswrapper[4884]: I1128 15:42:54.553573 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.150:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:42:54 crc kubenswrapper[4884]: I1128 15:42:54.553649 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="9af26263-cd93-4081-bdcd-518ad0587028" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.150:9292/healthcheck\": dial tcp 10.217.0.150:9292: i/o timeout" Nov 28 15:42:54 crc kubenswrapper[4884]: I1128 15:42:54.876036 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerStarted","Data":"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e"} Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210073 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wgxnl"] Nov 28 15:42:55 crc kubenswrapper[4884]: E1128 15:42:55.210471 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac05886e-6151-4d56-92dd-093c27c7c955" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210487 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac05886e-6151-4d56-92dd-093c27c7c955" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: E1128 15:42:55.210506 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210514 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: E1128 15:42:55.210533 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6386f1f-20c4-4856-8171-064b53c9fa7f" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210539 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6386f1f-20c4-4856-8171-064b53c9fa7f" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210709 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210718 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6386f1f-20c4-4856-8171-064b53c9fa7f" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.210740 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac05886e-6151-4d56-92dd-093c27c7c955" containerName="mariadb-account-create" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.211324 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.213359 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.213597 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qlcfb" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.214341 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.217908 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wgxnl"] Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.305655 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.305708 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.305758 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkwvr\" (UniqueName: \"kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.305775 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.407499 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.407549 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.407592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkwvr\" (UniqueName: \"kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.407614 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.413154 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.413579 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.413803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.428484 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkwvr\" (UniqueName: \"kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr\") pod \"nova-cell0-conductor-db-sync-wgxnl\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.532678 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.886893 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerStarted","Data":"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55"} Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.887644 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.912288 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9730295770000001 podStartE2EDuration="5.912264315s" podCreationTimestamp="2025-11-28 15:42:50 +0000 UTC" firstStartedPulling="2025-11-28 15:42:51.680161909 +0000 UTC m=+1411.242945740" lastFinishedPulling="2025-11-28 15:42:55.619396677 +0000 UTC m=+1415.182180478" observedRunningTime="2025-11-28 15:42:55.908844783 +0000 UTC m=+1415.471628594" watchObservedRunningTime="2025-11-28 15:42:55.912264315 +0000 UTC m=+1415.475048116" Nov 28 15:42:55 crc kubenswrapper[4884]: I1128 15:42:55.983846 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wgxnl"] Nov 28 15:42:55 crc kubenswrapper[4884]: W1128 15:42:55.984990 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc0daf5c_fae9_4225_95bf_2fb62f2da934.slice/crio-2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245 WatchSource:0}: Error finding container 2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245: Status 404 returned error can't find the container with id 2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245 Nov 28 15:42:56 crc kubenswrapper[4884]: I1128 15:42:56.901181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" event={"ID":"cc0daf5c-fae9-4225-95bf-2fb62f2da934","Type":"ContainerStarted","Data":"2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245"} Nov 28 15:43:06 crc kubenswrapper[4884]: I1128 15:43:06.030229 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" event={"ID":"cc0daf5c-fae9-4225-95bf-2fb62f2da934","Type":"ContainerStarted","Data":"078b8f4a791acff651b4144da1d83ca6c40df0796a420224b04ec8f504b1c85b"} Nov 28 15:43:06 crc kubenswrapper[4884]: I1128 15:43:06.069757 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" podStartSLOduration=2.102824137 podStartE2EDuration="11.069725901s" podCreationTimestamp="2025-11-28 15:42:55 +0000 UTC" firstStartedPulling="2025-11-28 15:42:55.986873705 +0000 UTC m=+1415.549657506" lastFinishedPulling="2025-11-28 15:43:04.953775469 +0000 UTC m=+1424.516559270" observedRunningTime="2025-11-28 15:43:06.054994365 +0000 UTC m=+1425.617778196" watchObservedRunningTime="2025-11-28 15:43:06.069725901 +0000 UTC m=+1425.632509742" Nov 28 15:43:15 crc kubenswrapper[4884]: I1128 15:43:15.130132 4884 generic.go:334] "Generic (PLEG): container finished" podID="cc0daf5c-fae9-4225-95bf-2fb62f2da934" containerID="078b8f4a791acff651b4144da1d83ca6c40df0796a420224b04ec8f504b1c85b" exitCode=0 Nov 28 15:43:15 crc kubenswrapper[4884]: I1128 15:43:15.130239 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" event={"ID":"cc0daf5c-fae9-4225-95bf-2fb62f2da934","Type":"ContainerDied","Data":"078b8f4a791acff651b4144da1d83ca6c40df0796a420224b04ec8f504b1c85b"} Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.558636 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.637923 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle\") pod \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.638005 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data\") pod \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.638162 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkwvr\" (UniqueName: \"kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr\") pod \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.638228 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts\") pod \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\" (UID: \"cc0daf5c-fae9-4225-95bf-2fb62f2da934\") " Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.646117 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts" (OuterVolumeSpecName: "scripts") pod "cc0daf5c-fae9-4225-95bf-2fb62f2da934" (UID: "cc0daf5c-fae9-4225-95bf-2fb62f2da934"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.649040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr" (OuterVolumeSpecName: "kube-api-access-fkwvr") pod "cc0daf5c-fae9-4225-95bf-2fb62f2da934" (UID: "cc0daf5c-fae9-4225-95bf-2fb62f2da934"). InnerVolumeSpecName "kube-api-access-fkwvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.668102 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc0daf5c-fae9-4225-95bf-2fb62f2da934" (UID: "cc0daf5c-fae9-4225-95bf-2fb62f2da934"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.670069 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data" (OuterVolumeSpecName: "config-data") pod "cc0daf5c-fae9-4225-95bf-2fb62f2da934" (UID: "cc0daf5c-fae9-4225-95bf-2fb62f2da934"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.739805 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.739846 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.739855 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkwvr\" (UniqueName: \"kubernetes.io/projected/cc0daf5c-fae9-4225-95bf-2fb62f2da934-kube-api-access-fkwvr\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:16 crc kubenswrapper[4884]: I1128 15:43:16.739867 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc0daf5c-fae9-4225-95bf-2fb62f2da934-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.162562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" event={"ID":"cc0daf5c-fae9-4225-95bf-2fb62f2da934","Type":"ContainerDied","Data":"2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245"} Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.162638 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f63505f522ad2666bd4d8bbb93dabfadbd9056880d717cac089a1f1b14a4245" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.162728 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-wgxnl" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.312488 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:43:17 crc kubenswrapper[4884]: E1128 15:43:17.313114 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc0daf5c-fae9-4225-95bf-2fb62f2da934" containerName="nova-cell0-conductor-db-sync" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.313146 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc0daf5c-fae9-4225-95bf-2fb62f2da934" containerName="nova-cell0-conductor-db-sync" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.313455 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc0daf5c-fae9-4225-95bf-2fb62f2da934" containerName="nova-cell0-conductor-db-sync" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.314382 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.317608 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-qlcfb" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.318464 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.332201 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.351911 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.352052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htm6x\" (UniqueName: \"kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.352120 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.453251 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htm6x\" (UniqueName: \"kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.453359 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.453456 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.459055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.459055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.484317 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htm6x\" (UniqueName: \"kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x\") pod \"nova-cell0-conductor-0\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:17 crc kubenswrapper[4884]: I1128 15:43:17.642039 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:18 crc kubenswrapper[4884]: I1128 15:43:18.187753 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:43:19 crc kubenswrapper[4884]: I1128 15:43:19.185858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"fdd604f8-af2a-40bb-b85a-14d7a4eeb000","Type":"ContainerStarted","Data":"eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c"} Nov 28 15:43:19 crc kubenswrapper[4884]: I1128 15:43:19.186423 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:19 crc kubenswrapper[4884]: I1128 15:43:19.186442 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"fdd604f8-af2a-40bb-b85a-14d7a4eeb000","Type":"ContainerStarted","Data":"94ff87db0fa96609c40771cfd3ba38660175958b52bbe20173bd45d00e84082f"} Nov 28 15:43:19 crc kubenswrapper[4884]: I1128 15:43:19.226414 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.226387669 podStartE2EDuration="2.226387669s" podCreationTimestamp="2025-11-28 15:43:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:19.210894495 +0000 UTC m=+1438.773678296" watchObservedRunningTime="2025-11-28 15:43:19.226387669 +0000 UTC m=+1438.789171500" Nov 28 15:43:21 crc kubenswrapper[4884]: I1128 15:43:21.185189 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:43:25 crc kubenswrapper[4884]: I1128 15:43:25.611292 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:25 crc kubenswrapper[4884]: I1128 15:43:25.612007 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" containerName="kube-state-metrics" containerID="cri-o://33d3a0261db185faf83d34a47fdd9b6a51b9036f89e24846369b69f505429032" gracePeriod=30 Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.256189 4884 generic.go:334] "Generic (PLEG): container finished" podID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" containerID="33d3a0261db185faf83d34a47fdd9b6a51b9036f89e24846369b69f505429032" exitCode=2 Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.256251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c","Type":"ContainerDied","Data":"33d3a0261db185faf83d34a47fdd9b6a51b9036f89e24846369b69f505429032"} Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.703542 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.835344 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gdjj\" (UniqueName: \"kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj\") pod \"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c\" (UID: \"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c\") " Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.851268 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj" (OuterVolumeSpecName: "kube-api-access-9gdjj") pod "bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" (UID: "bfe29bb1-1749-44e8-8a9a-a44a8e85e95c"). InnerVolumeSpecName "kube-api-access-9gdjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:26 crc kubenswrapper[4884]: I1128 15:43:26.939272 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gdjj\" (UniqueName: \"kubernetes.io/projected/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c-kube-api-access-9gdjj\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.267545 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"bfe29bb1-1749-44e8-8a9a-a44a8e85e95c","Type":"ContainerDied","Data":"35aaa2c4a815d508f202e3cacf560818d17aa894afd5e4ee436dc53cacf1b186"} Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.267808 4884 scope.go:117] "RemoveContainer" containerID="33d3a0261db185faf83d34a47fdd9b6a51b9036f89e24846369b69f505429032" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.268046 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.301694 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.309251 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.327536 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:27 crc kubenswrapper[4884]: E1128 15:43:27.327911 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" containerName="kube-state-metrics" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.327925 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" containerName="kube-state-metrics" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.328138 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" containerName="kube-state-metrics" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.328657 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.332601 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.333037 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.349374 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.418204 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.418640 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-central-agent" containerID="cri-o://29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8" gracePeriod=30 Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.418736 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="sg-core" containerID="cri-o://4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e" gracePeriod=30 Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.418754 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-notification-agent" containerID="cri-o://49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6" gracePeriod=30 Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.418910 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="proxy-httpd" containerID="cri-o://403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55" gracePeriod=30 Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.447590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.447844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7cg2\" (UniqueName: \"kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.447956 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.448221 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.549710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.550063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.550578 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7cg2\" (UniqueName: \"kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.550688 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.560883 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.577067 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.582151 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7cg2\" (UniqueName: \"kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.586672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.649147 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:43:27 crc kubenswrapper[4884]: I1128 15:43:27.684796 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.128731 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.198069 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-chfqr"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.199191 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.202640 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.210155 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.210919 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-chfqr"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280079 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerID="403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55" exitCode=0 Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280135 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerID="4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e" exitCode=2 Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280148 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerID="29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8" exitCode=0 Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280234 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerDied","Data":"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55"} Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280266 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerDied","Data":"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e"} Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.280280 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerDied","Data":"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8"} Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.281779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"585208a7-186b-40da-a7af-be303777e77c","Type":"ContainerStarted","Data":"b64e8785dfef95508e56c17da6cc095bf7935164fe8821054cd246eaeccee998"} Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.346379 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.347536 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.350401 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.368847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.368984 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.369015 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.369040 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5tgj\" (UniqueName: \"kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.369076 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.424391 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.426367 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.429029 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.434746 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.449721 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.450981 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.460288 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.470207 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.471190 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.471283 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gzht\" (UniqueName: \"kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.472024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.472233 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.472282 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.472355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.472411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5tgj\" (UniqueName: \"kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.480547 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.484638 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.496911 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.503255 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5tgj\" (UniqueName: \"kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj\") pod \"nova-cell0-cell-mapping-chfqr\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.517495 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.536463 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.544381 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.552972 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.574263 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.577609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.577783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.577870 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gb4b\" (UniqueName: \"kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.577944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gzht\" (UniqueName: \"kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578027 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578129 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578343 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578397 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4bgz\" (UniqueName: \"kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578452 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvs97\" (UniqueName: \"kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578479 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578520 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578533 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.578639 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.605062 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.623714 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.624714 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gzht\" (UniqueName: \"kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht\") pod \"nova-cell1-novncproxy-0\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.668502 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683119 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvs97\" (UniqueName: \"kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683181 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683196 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683211 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683253 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683281 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683309 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gb4b\" (UniqueName: \"kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.683411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4bgz\" (UniqueName: \"kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.684444 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.684717 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.695568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.696774 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.710665 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gb4b\" (UniqueName: \"kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.711707 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.713590 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvs97\" (UniqueName: \"kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.718512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.720694 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " pod="openstack/nova-metadata-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.724672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.737806 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4bgz\" (UniqueName: \"kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz\") pod \"nova-scheduler-0\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.753028 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.765815 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfe29bb1-1749-44e8-8a9a-a44a8e85e95c" path="/var/lib/kubelet/pods/bfe29bb1-1749-44e8-8a9a-a44a8e85e95c/volumes" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.768345 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.770373 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.778384 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.778389 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.785687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.785724 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.785798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27v27\" (UniqueName: \"kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.785825 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.796429 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.796464 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898637 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898688 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27v27\" (UniqueName: \"kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898740 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.898798 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.900074 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.900173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.900223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.900286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.901547 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.917768 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27v27\" (UniqueName: \"kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27\") pod \"dnsmasq-dns-865f5d856f-89mq2\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:28 crc kubenswrapper[4884]: I1128 15:43:28.980685 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.126891 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.205506 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-chfqr"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.410332 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-chfqr" event={"ID":"6b792e18-e8f2-4f95-ad78-a9657e30d651","Type":"ContainerStarted","Data":"6b2df877f9face4e9283832dc34c8cb75468f5d2c5c811d0febb28fd0aa29fc5"} Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.637984 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.648297 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zssbq"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.649569 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.653027 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.653312 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.660691 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zssbq"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.717414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.753606 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.753727 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zrjr\" (UniqueName: \"kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.753766 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.753854 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.828262 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.855797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.855883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zrjr\" (UniqueName: \"kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.855919 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.855986 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.863021 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.863682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.864722 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.876364 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zrjr\" (UniqueName: \"kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr\") pod \"nova-cell1-conductor-db-sync-zssbq\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.980736 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:29 crc kubenswrapper[4884]: I1128 15:43:29.995047 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:43:30 crc kubenswrapper[4884]: W1128 15:43:30.006173 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9dde288e_b169_42f7_af64_647eef580099.slice/crio-b5e267bebc1a1476f792661862fac9d529759b08249ab34187709ef405116c75 WatchSource:0}: Error finding container b5e267bebc1a1476f792661862fac9d529759b08249ab34187709ef405116c75: Status 404 returned error can't find the container with id b5e267bebc1a1476f792661862fac9d529759b08249ab34187709ef405116c75 Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.144950 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.242282 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367173 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367231 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367286 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367324 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367398 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367499 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367560 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367589 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zwxf\" (UniqueName: \"kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf\") pod \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\" (UID: \"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a\") " Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.367779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.368118 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.368136 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.376256 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts" (OuterVolumeSpecName: "scripts") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.382235 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf" (OuterVolumeSpecName: "kube-api-access-2zwxf") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "kube-api-access-2zwxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.402170 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.435221 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" event={"ID":"9dde288e-b169-42f7-af64-647eef580099","Type":"ContainerStarted","Data":"b5e267bebc1a1476f792661862fac9d529759b08249ab34187709ef405116c75"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.443128 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"585208a7-186b-40da-a7af-be303777e77c","Type":"ContainerStarted","Data":"b42cabfe2b0f40870c93167cc022af22181c4c8400a649d0268e8625b2010d96"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.443218 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.445545 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52b4235-2446-4735-afa6-83e6e4e57da1","Type":"ContainerStarted","Data":"31a82e3b8c0336d832fdc9d917735949acb98090ab0c3fd14f9ebea294494f44"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.452286 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerID="49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6" exitCode=0 Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.452336 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerDied","Data":"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.452359 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8fe7b16-7b70-4153-85b1-acbbfbb9f30a","Type":"ContainerDied","Data":"20080fbf6f161097292f8e6bb9cbd71547033b8ef4b5600b40a96d6737ab1d8c"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.452374 4884 scope.go:117] "RemoveContainer" containerID="403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.452484 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.455882 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-chfqr" event={"ID":"6b792e18-e8f2-4f95-ad78-a9657e30d651","Type":"ContainerStarted","Data":"7c3ae662b3a86550433fd2c1c5a7ca2abc72a8f259452abb679aab5d0f1f8da2"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.460563 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerStarted","Data":"389a6777ad171bcd1719308e2c8cccd8ef08862a3433c641460dd2f46202979a"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.467070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cc473f47-75ab-4666-a51e-14a7222c2383","Type":"ContainerStarted","Data":"e1d729e399f243ead864801c6198a28c928f09155449e3776c6bfe31dbca12f7"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.472511 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zwxf\" (UniqueName: \"kubernetes.io/projected/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-kube-api-access-2zwxf\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.472540 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.472549 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.472732 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.591802255 podStartE2EDuration="3.472709244s" podCreationTimestamp="2025-11-28 15:43:27 +0000 UTC" firstStartedPulling="2025-11-28 15:43:28.133399588 +0000 UTC m=+1447.696183389" lastFinishedPulling="2025-11-28 15:43:29.014306577 +0000 UTC m=+1448.577090378" observedRunningTime="2025-11-28 15:43:30.467448217 +0000 UTC m=+1450.030232018" watchObservedRunningTime="2025-11-28 15:43:30.472709244 +0000 UTC m=+1450.035493045" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.473115 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerStarted","Data":"185ebc782e52f756b5e076a40d9ac489d10c3708b8149f4bd091bdc7a5e5964e"} Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.488279 4884 scope.go:117] "RemoveContainer" containerID="4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.493962 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-chfqr" podStartSLOduration=2.493943296 podStartE2EDuration="2.493943296s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:30.481083666 +0000 UTC m=+1450.043867467" watchObservedRunningTime="2025-11-28 15:43:30.493943296 +0000 UTC m=+1450.056727097" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.504853 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.520340 4884 scope.go:117] "RemoveContainer" containerID="49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.524443 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zssbq"] Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.541004 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data" (OuterVolumeSpecName: "config-data") pod "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" (UID: "a8fe7b16-7b70-4153-85b1-acbbfbb9f30a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.558334 4884 scope.go:117] "RemoveContainer" containerID="29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8" Nov 28 15:43:30 crc kubenswrapper[4884]: W1128 15:43:30.564423 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbba819eb_5c1d_410b_9a6e_59c00da11771.slice/crio-0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66 WatchSource:0}: Error finding container 0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66: Status 404 returned error can't find the container with id 0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66 Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.574130 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.574157 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.580833 4884 scope.go:117] "RemoveContainer" containerID="403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.581754 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55\": container with ID starting with 403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55 not found: ID does not exist" containerID="403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.581782 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55"} err="failed to get container status \"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55\": rpc error: code = NotFound desc = could not find container \"403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55\": container with ID starting with 403f170607890471def029adf234b46a812b18fadffcf9f6ee37c81680730a55 not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.581800 4884 scope.go:117] "RemoveContainer" containerID="4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.581956 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e\": container with ID starting with 4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e not found: ID does not exist" containerID="4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.581976 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e"} err="failed to get container status \"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e\": rpc error: code = NotFound desc = could not find container \"4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e\": container with ID starting with 4694f36c09754493a6c6e78d09eee1215064f4d1cdd23273392692367911899e not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.581988 4884 scope.go:117] "RemoveContainer" containerID="49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.582230 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6\": container with ID starting with 49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6 not found: ID does not exist" containerID="49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.582249 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6"} err="failed to get container status \"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6\": rpc error: code = NotFound desc = could not find container \"49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6\": container with ID starting with 49d8db2e919a92c764f64d95699a9e0ab00ca1edebfd4e81616b6ecfcbc04dd6 not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.582260 4884 scope.go:117] "RemoveContainer" containerID="29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.582424 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8\": container with ID starting with 29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8 not found: ID does not exist" containerID="29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.582441 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8"} err="failed to get container status \"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8\": rpc error: code = NotFound desc = could not find container \"29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8\": container with ID starting with 29bf163c0ec46df45874f29337cb5b2dfcfaf1eba1780b9c16f027f50d6fc0c8 not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.847398 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.862765 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871161 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.871589 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="sg-core" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871603 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="sg-core" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.871626 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="proxy-httpd" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871633 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="proxy-httpd" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.871647 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-notification-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871654 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-notification-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: E1128 15:43:30.871669 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-central-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871675 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-central-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871844 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-notification-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871859 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="ceilometer-central-agent" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871875 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="proxy-httpd" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.871888 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" containerName="sg-core" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.873566 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.876679 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.876867 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.878191 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.882864 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.980983 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981031 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r85dl\" (UniqueName: \"kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981348 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981404 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:30 crc kubenswrapper[4884]: I1128 15:43:30.981612 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083210 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083280 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r85dl\" (UniqueName: \"kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083340 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083381 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.083423 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.084752 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.086934 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.088361 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.088357 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.091222 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.091307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.101700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r85dl\" (UniqueName: \"kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.107047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.255926 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.521442 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zssbq" event={"ID":"bba819eb-5c1d-410b-9a6e-59c00da11771","Type":"ContainerStarted","Data":"d3080a89a34206a9653107776ef014eae527615ab0f6f525bebc4116890adea8"} Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.521791 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zssbq" event={"ID":"bba819eb-5c1d-410b-9a6e-59c00da11771","Type":"ContainerStarted","Data":"0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66"} Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.544779 4884 generic.go:334] "Generic (PLEG): container finished" podID="9dde288e-b169-42f7-af64-647eef580099" containerID="3959288d20e50b1d754a23189e9cef53aa5c34cf9ec15292dbfa2b3606c62d76" exitCode=0 Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.544847 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" event={"ID":"9dde288e-b169-42f7-af64-647eef580099","Type":"ContainerDied","Data":"3959288d20e50b1d754a23189e9cef53aa5c34cf9ec15292dbfa2b3606c62d76"} Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.551910 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-zssbq" podStartSLOduration=2.5518966990000003 podStartE2EDuration="2.551896699s" podCreationTimestamp="2025-11-28 15:43:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:31.551460348 +0000 UTC m=+1451.114244159" watchObservedRunningTime="2025-11-28 15:43:31.551896699 +0000 UTC m=+1451.114680500" Nov 28 15:43:31 crc kubenswrapper[4884]: I1128 15:43:31.871433 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:43:32 crc kubenswrapper[4884]: I1128 15:43:32.599594 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" event={"ID":"9dde288e-b169-42f7-af64-647eef580099","Type":"ContainerStarted","Data":"30f8a4d90aad4194121e096b6c45c4a7746bc54c18f144f7f144a9493b003d9e"} Nov 28 15:43:32 crc kubenswrapper[4884]: I1128 15:43:32.642214 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" podStartSLOduration=4.642196161 podStartE2EDuration="4.642196161s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:32.622527096 +0000 UTC m=+1452.185310897" watchObservedRunningTime="2025-11-28 15:43:32.642196161 +0000 UTC m=+1452.204979962" Nov 28 15:43:32 crc kubenswrapper[4884]: I1128 15:43:32.702787 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8fe7b16-7b70-4153-85b1-acbbfbb9f30a" path="/var/lib/kubelet/pods/a8fe7b16-7b70-4153-85b1-acbbfbb9f30a/volumes" Nov 28 15:43:33 crc kubenswrapper[4884]: I1128 15:43:33.200975 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:33 crc kubenswrapper[4884]: I1128 15:43:33.216899 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:43:33 crc kubenswrapper[4884]: W1128 15:43:33.603371 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78e80f6b_bdb1_4fd7_9a51_7bc7b71086c2.slice/crio-1e39c8ec3f6e545e451da5047087ddefaccf509123f49654450c7fb5dbf4fccb WatchSource:0}: Error finding container 1e39c8ec3f6e545e451da5047087ddefaccf509123f49654450c7fb5dbf4fccb: Status 404 returned error can't find the container with id 1e39c8ec3f6e545e451da5047087ddefaccf509123f49654450c7fb5dbf4fccb Nov 28 15:43:33 crc kubenswrapper[4884]: I1128 15:43:33.623315 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.632389 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerStarted","Data":"1e39c8ec3f6e545e451da5047087ddefaccf509123f49654450c7fb5dbf4fccb"} Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.634893 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52b4235-2446-4735-afa6-83e6e4e57da1","Type":"ContainerStarted","Data":"329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661"} Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.637934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerStarted","Data":"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b"} Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.640442 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cc473f47-75ab-4666-a51e-14a7222c2383","Type":"ContainerStarted","Data":"423fc717ca99c44555115e638f9599ae77f0b956b2f33212fa986e4d737e108b"} Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.640568 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="cc473f47-75ab-4666-a51e-14a7222c2383" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://423fc717ca99c44555115e638f9599ae77f0b956b2f33212fa986e4d737e108b" gracePeriod=30 Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.654935 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerStarted","Data":"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379"} Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.667876 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.564500659 podStartE2EDuration="6.667857298s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="2025-11-28 15:43:29.735066532 +0000 UTC m=+1449.297850333" lastFinishedPulling="2025-11-28 15:43:33.838423171 +0000 UTC m=+1453.401206972" observedRunningTime="2025-11-28 15:43:34.655504319 +0000 UTC m=+1454.218288120" watchObservedRunningTime="2025-11-28 15:43:34.667857298 +0000 UTC m=+1454.230641089" Nov 28 15:43:34 crc kubenswrapper[4884]: I1128 15:43:34.695583 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.672291071 podStartE2EDuration="6.695561357s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="2025-11-28 15:43:29.838472657 +0000 UTC m=+1449.401256458" lastFinishedPulling="2025-11-28 15:43:33.861742943 +0000 UTC m=+1453.424526744" observedRunningTime="2025-11-28 15:43:34.677218364 +0000 UTC m=+1454.240002215" watchObservedRunningTime="2025-11-28 15:43:34.695561357 +0000 UTC m=+1454.258345158" Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.667387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerStarted","Data":"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af"} Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.673776 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerStarted","Data":"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd"} Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.673813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerStarted","Data":"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9"} Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.678749 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-log" containerID="cri-o://b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" gracePeriod=30 Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.678990 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerStarted","Data":"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6"} Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.679044 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-metadata" containerID="cri-o://71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" gracePeriod=30 Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.687642 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.988985247 podStartE2EDuration="7.687628528s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="2025-11-28 15:43:30.170163932 +0000 UTC m=+1449.732947723" lastFinishedPulling="2025-11-28 15:43:33.868807203 +0000 UTC m=+1453.431591004" observedRunningTime="2025-11-28 15:43:35.685398605 +0000 UTC m=+1455.248182406" watchObservedRunningTime="2025-11-28 15:43:35.687628528 +0000 UTC m=+1455.250412329" Nov 28 15:43:35 crc kubenswrapper[4884]: I1128 15:43:35.722862 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.516441902 podStartE2EDuration="7.722840478s" podCreationTimestamp="2025-11-28 15:43:28 +0000 UTC" firstStartedPulling="2025-11-28 15:43:29.653962354 +0000 UTC m=+1449.216746155" lastFinishedPulling="2025-11-28 15:43:33.86036093 +0000 UTC m=+1453.423144731" observedRunningTime="2025-11-28 15:43:35.701868902 +0000 UTC m=+1455.264652703" watchObservedRunningTime="2025-11-28 15:43:35.722840478 +0000 UTC m=+1455.285624279" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.324420 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.414660 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs\") pod \"254b1391-c090-4e1d-93cb-8cac64d7515d\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.414984 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data\") pod \"254b1391-c090-4e1d-93cb-8cac64d7515d\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.415123 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvs97\" (UniqueName: \"kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97\") pod \"254b1391-c090-4e1d-93cb-8cac64d7515d\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.415205 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle\") pod \"254b1391-c090-4e1d-93cb-8cac64d7515d\" (UID: \"254b1391-c090-4e1d-93cb-8cac64d7515d\") " Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.417406 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs" (OuterVolumeSpecName: "logs") pod "254b1391-c090-4e1d-93cb-8cac64d7515d" (UID: "254b1391-c090-4e1d-93cb-8cac64d7515d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.422806 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97" (OuterVolumeSpecName: "kube-api-access-fvs97") pod "254b1391-c090-4e1d-93cb-8cac64d7515d" (UID: "254b1391-c090-4e1d-93cb-8cac64d7515d"). InnerVolumeSpecName "kube-api-access-fvs97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.441430 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "254b1391-c090-4e1d-93cb-8cac64d7515d" (UID: "254b1391-c090-4e1d-93cb-8cac64d7515d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.468250 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data" (OuterVolumeSpecName: "config-data") pod "254b1391-c090-4e1d-93cb-8cac64d7515d" (UID: "254b1391-c090-4e1d-93cb-8cac64d7515d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.517579 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.517827 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvs97\" (UniqueName: \"kubernetes.io/projected/254b1391-c090-4e1d-93cb-8cac64d7515d-kube-api-access-fvs97\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.517921 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254b1391-c090-4e1d-93cb-8cac64d7515d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.518005 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/254b1391-c090-4e1d-93cb-8cac64d7515d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.691140 4884 generic.go:334] "Generic (PLEG): container finished" podID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerID="71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" exitCode=0 Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.691171 4884 generic.go:334] "Generic (PLEG): container finished" podID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerID="b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" exitCode=143 Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.691229 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.706994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerStarted","Data":"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a"} Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.707362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerDied","Data":"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6"} Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.707666 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerDied","Data":"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b"} Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.708131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"254b1391-c090-4e1d-93cb-8cac64d7515d","Type":"ContainerDied","Data":"389a6777ad171bcd1719308e2c8cccd8ef08862a3433c641460dd2f46202979a"} Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.707764 4884 scope.go:117] "RemoveContainer" containerID="71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.757487 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.766161 4884 scope.go:117] "RemoveContainer" containerID="b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.772901 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.786002 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:36 crc kubenswrapper[4884]: E1128 15:43:36.786479 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-metadata" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.786500 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-metadata" Nov 28 15:43:36 crc kubenswrapper[4884]: E1128 15:43:36.786524 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-log" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.786533 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-log" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.786746 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-log" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.786774 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" containerName="nova-metadata-metadata" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.787928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.789829 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.790030 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.797409 4884 scope.go:117] "RemoveContainer" containerID="71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.797633 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:36 crc kubenswrapper[4884]: E1128 15:43:36.798015 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6\": container with ID starting with 71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6 not found: ID does not exist" containerID="71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.798055 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6"} err="failed to get container status \"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6\": rpc error: code = NotFound desc = could not find container \"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6\": container with ID starting with 71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6 not found: ID does not exist" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.798101 4884 scope.go:117] "RemoveContainer" containerID="b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" Nov 28 15:43:36 crc kubenswrapper[4884]: E1128 15:43:36.801412 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b\": container with ID starting with b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b not found: ID does not exist" containerID="b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.801465 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b"} err="failed to get container status \"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b\": rpc error: code = NotFound desc = could not find container \"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b\": container with ID starting with b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b not found: ID does not exist" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.801489 4884 scope.go:117] "RemoveContainer" containerID="71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.802810 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6"} err="failed to get container status \"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6\": rpc error: code = NotFound desc = could not find container \"71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6\": container with ID starting with 71c46d9cea2da52eeff6a2179a2f4c4bd8345e4c4424e61574283d10e0df24f6 not found: ID does not exist" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.802862 4884 scope.go:117] "RemoveContainer" containerID="b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.803168 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b"} err="failed to get container status \"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b\": rpc error: code = NotFound desc = could not find container \"b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b\": container with ID starting with b6c5e8d551c8b304ce650b4cc37c99699a8d04560f89c05f43912221c836340b not found: ID does not exist" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.925440 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.925502 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.925703 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5jnj\" (UniqueName: \"kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.925825 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:36 crc kubenswrapper[4884]: I1128 15:43:36.926021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028248 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028350 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028414 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028450 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028522 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5jnj\" (UniqueName: \"kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.028960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.034344 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.034442 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.046888 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.052113 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5jnj\" (UniqueName: \"kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj\") pod \"nova-metadata-0\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.153308 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.652841 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.661043 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.717801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerStarted","Data":"0d9102a0e2f4b548b1e45de3ecedbe8737432c8a61a16661c1c474507f06a404"} Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.720399 4884 generic.go:334] "Generic (PLEG): container finished" podID="6b792e18-e8f2-4f95-ad78-a9657e30d651" containerID="7c3ae662b3a86550433fd2c1c5a7ca2abc72a8f259452abb679aab5d0f1f8da2" exitCode=0 Nov 28 15:43:37 crc kubenswrapper[4884]: I1128 15:43:37.720488 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-chfqr" event={"ID":"6b792e18-e8f2-4f95-ad78-a9657e30d651","Type":"ContainerDied","Data":"7c3ae662b3a86550433fd2c1c5a7ca2abc72a8f259452abb679aab5d0f1f8da2"} Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.671560 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.712059 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="254b1391-c090-4e1d-93cb-8cac64d7515d" path="/var/lib/kubelet/pods/254b1391-c090-4e1d-93cb-8cac64d7515d/volumes" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.738049 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerStarted","Data":"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73"} Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.738687 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.743661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerStarted","Data":"32ba7e16f2dea6c4249c539765a6254e245e09abcbbe10c696d6b4f63e80584d"} Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.743699 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerStarted","Data":"356c6a181f8baa4e4f7bc613da8e659e49bfc056bb3d7d30f338ceda3391c030"} Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.756339 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.759884 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.762314 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.734396514 podStartE2EDuration="8.762297171s" podCreationTimestamp="2025-11-28 15:43:30 +0000 UTC" firstStartedPulling="2025-11-28 15:43:33.810465716 +0000 UTC m=+1453.373249517" lastFinishedPulling="2025-11-28 15:43:37.838366373 +0000 UTC m=+1457.401150174" observedRunningTime="2025-11-28 15:43:38.759794121 +0000 UTC m=+1458.322577932" watchObservedRunningTime="2025-11-28 15:43:38.762297171 +0000 UTC m=+1458.325080972" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.783194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.783569 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.797226 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.797201014 podStartE2EDuration="2.797201014s" podCreationTimestamp="2025-11-28 15:43:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:38.787571461 +0000 UTC m=+1458.350355272" watchObservedRunningTime="2025-11-28 15:43:38.797201014 +0000 UTC m=+1458.359984835" Nov 28 15:43:38 crc kubenswrapper[4884]: I1128 15:43:38.842885 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.130352 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.190219 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.190574 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="dnsmasq-dns" containerID="cri-o://4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527" gracePeriod=10 Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.382936 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.483594 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5tgj\" (UniqueName: \"kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj\") pod \"6b792e18-e8f2-4f95-ad78-a9657e30d651\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.483766 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle\") pod \"6b792e18-e8f2-4f95-ad78-a9657e30d651\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.483893 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts\") pod \"6b792e18-e8f2-4f95-ad78-a9657e30d651\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.483961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data\") pod \"6b792e18-e8f2-4f95-ad78-a9657e30d651\" (UID: \"6b792e18-e8f2-4f95-ad78-a9657e30d651\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.489629 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts" (OuterVolumeSpecName: "scripts") pod "6b792e18-e8f2-4f95-ad78-a9657e30d651" (UID: "6b792e18-e8f2-4f95-ad78-a9657e30d651"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.495286 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj" (OuterVolumeSpecName: "kube-api-access-q5tgj") pod "6b792e18-e8f2-4f95-ad78-a9657e30d651" (UID: "6b792e18-e8f2-4f95-ad78-a9657e30d651"). InnerVolumeSpecName "kube-api-access-q5tgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.531690 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b792e18-e8f2-4f95-ad78-a9657e30d651" (UID: "6b792e18-e8f2-4f95-ad78-a9657e30d651"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.531721 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data" (OuterVolumeSpecName: "config-data") pod "6b792e18-e8f2-4f95-ad78-a9657e30d651" (UID: "6b792e18-e8f2-4f95-ad78-a9657e30d651"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.589274 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.589306 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.589316 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5tgj\" (UniqueName: \"kubernetes.io/projected/6b792e18-e8f2-4f95-ad78-a9657e30d651-kube-api-access-q5tgj\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.589327 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b792e18-e8f2-4f95-ad78-a9657e30d651-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.658924 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.753753 4884 generic.go:334] "Generic (PLEG): container finished" podID="bba819eb-5c1d-410b-9a6e-59c00da11771" containerID="d3080a89a34206a9653107776ef014eae527615ab0f6f525bebc4116890adea8" exitCode=0 Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.753817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zssbq" event={"ID":"bba819eb-5c1d-410b-9a6e-59c00da11771","Type":"ContainerDied","Data":"d3080a89a34206a9653107776ef014eae527615ab0f6f525bebc4116890adea8"} Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.756408 4884 generic.go:334] "Generic (PLEG): container finished" podID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerID="4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527" exitCode=0 Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.756512 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.756887 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" event={"ID":"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6","Type":"ContainerDied","Data":"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527"} Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.756914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-466lz" event={"ID":"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6","Type":"ContainerDied","Data":"1b44f7f3ca48e58364c0b940d876de1176ebd3892a99eec202bf5df7e33a5c6f"} Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.756929 4884 scope.go:117] "RemoveContainer" containerID="4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.759825 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-chfqr" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.761959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-chfqr" event={"ID":"6b792e18-e8f2-4f95-ad78-a9657e30d651","Type":"ContainerDied","Data":"6b2df877f9face4e9283832dc34c8cb75468f5d2c5c811d0febb28fd0aa29fc5"} Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.761994 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b2df877f9face4e9283832dc34c8cb75468f5d2c5c811d0febb28fd0aa29fc5" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.792649 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.792987 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.793111 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.793176 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5plpr\" (UniqueName: \"kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.793198 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.793222 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0\") pod \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\" (UID: \"a9242cf5-22c8-48a9-9d72-6027ba8f5fa6\") " Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.793236 4884 scope.go:117] "RemoveContainer" containerID="45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.804617 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.808730 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr" (OuterVolumeSpecName: "kube-api-access-5plpr") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "kube-api-access-5plpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.822756 4884 scope.go:117] "RemoveContainer" containerID="4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527" Nov 28 15:43:39 crc kubenswrapper[4884]: E1128 15:43:39.823725 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527\": container with ID starting with 4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527 not found: ID does not exist" containerID="4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.823777 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527"} err="failed to get container status \"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527\": rpc error: code = NotFound desc = could not find container \"4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527\": container with ID starting with 4a3548b78fa0d63a96120b9bc417426d1f826521b7b12dde9d99180760e01527 not found: ID does not exist" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.823809 4884 scope.go:117] "RemoveContainer" containerID="45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637" Nov 28 15:43:39 crc kubenswrapper[4884]: E1128 15:43:39.824810 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637\": container with ID starting with 45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637 not found: ID does not exist" containerID="45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.824841 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637"} err="failed to get container status \"45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637\": rpc error: code = NotFound desc = could not find container \"45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637\": container with ID starting with 45b026334abcb0fc1aea329fe0127489fed2e5ab981535ed87f9e110c4c05637 not found: ID does not exist" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.842784 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.843127 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.870043 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.871477 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.873045 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.875572 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.882079 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config" (OuterVolumeSpecName: "config") pod "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" (UID: "a9242cf5-22c8-48a9-9d72-6027ba8f5fa6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896821 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896850 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896860 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896873 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5plpr\" (UniqueName: \"kubernetes.io/projected/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-kube-api-access-5plpr\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896883 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.896894 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.938266 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:39 crc kubenswrapper[4884]: I1128 15:43:39.951400 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.093249 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.109384 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-466lz"] Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.282337 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.697567 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" path="/var/lib/kubelet/pods/a9242cf5-22c8-48a9-9d72-6027ba8f5fa6/volumes" Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.770387 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-log" containerID="cri-o://f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379" gracePeriod=30 Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.771533 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-log" containerID="cri-o://356c6a181f8baa4e4f7bc613da8e659e49bfc056bb3d7d30f338ceda3391c030" gracePeriod=30 Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.772761 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-api" containerID="cri-o://83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af" gracePeriod=30 Nov 28 15:43:40 crc kubenswrapper[4884]: I1128 15:43:40.774961 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-metadata" containerID="cri-o://32ba7e16f2dea6c4249c539765a6254e245e09abcbbe10c696d6b4f63e80584d" gracePeriod=30 Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.198903 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.330188 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zrjr\" (UniqueName: \"kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr\") pod \"bba819eb-5c1d-410b-9a6e-59c00da11771\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.330259 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data\") pod \"bba819eb-5c1d-410b-9a6e-59c00da11771\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.330305 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") pod \"bba819eb-5c1d-410b-9a6e-59c00da11771\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.330432 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts\") pod \"bba819eb-5c1d-410b-9a6e-59c00da11771\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.339628 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts" (OuterVolumeSpecName: "scripts") pod "bba819eb-5c1d-410b-9a6e-59c00da11771" (UID: "bba819eb-5c1d-410b-9a6e-59c00da11771"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.345348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr" (OuterVolumeSpecName: "kube-api-access-2zrjr") pod "bba819eb-5c1d-410b-9a6e-59c00da11771" (UID: "bba819eb-5c1d-410b-9a6e-59c00da11771"). InnerVolumeSpecName "kube-api-access-2zrjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: E1128 15:43:41.372428 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle podName:bba819eb-5c1d-410b-9a6e-59c00da11771 nodeName:}" failed. No retries permitted until 2025-11-28 15:43:41.872401542 +0000 UTC m=+1461.435185343 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle") pod "bba819eb-5c1d-410b-9a6e-59c00da11771" (UID: "bba819eb-5c1d-410b-9a6e-59c00da11771") : error deleting /var/lib/kubelet/pods/bba819eb-5c1d-410b-9a6e-59c00da11771/volume-subpaths: remove /var/lib/kubelet/pods/bba819eb-5c1d-410b-9a6e-59c00da11771/volume-subpaths: no such file or directory Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.378220 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data" (OuterVolumeSpecName: "config-data") pod "bba819eb-5c1d-410b-9a6e-59c00da11771" (UID: "bba819eb-5c1d-410b-9a6e-59c00da11771"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.432747 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zrjr\" (UniqueName: \"kubernetes.io/projected/bba819eb-5c1d-410b-9a6e-59c00da11771-kube-api-access-2zrjr\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.432786 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.432796 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.785539 4884 generic.go:334] "Generic (PLEG): container finished" podID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerID="32ba7e16f2dea6c4249c539765a6254e245e09abcbbe10c696d6b4f63e80584d" exitCode=0 Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.786492 4884 generic.go:334] "Generic (PLEG): container finished" podID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerID="356c6a181f8baa4e4f7bc613da8e659e49bfc056bb3d7d30f338ceda3391c030" exitCode=143 Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.786770 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerDied","Data":"32ba7e16f2dea6c4249c539765a6254e245e09abcbbe10c696d6b4f63e80584d"} Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.786880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerDied","Data":"356c6a181f8baa4e4f7bc613da8e659e49bfc056bb3d7d30f338ceda3391c030"} Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.786963 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76","Type":"ContainerDied","Data":"0d9102a0e2f4b548b1e45de3ecedbe8737432c8a61a16661c1c474507f06a404"} Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.787049 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d9102a0e2f4b548b1e45de3ecedbe8737432c8a61a16661c1c474507f06a404" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.797856 4884 generic.go:334] "Generic (PLEG): container finished" podID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerID="f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379" exitCode=143 Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.797959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerDied","Data":"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379"} Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.804228 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-zssbq" event={"ID":"bba819eb-5c1d-410b-9a6e-59c00da11771","Type":"ContainerDied","Data":"0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66"} Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.804314 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ba66837f4fc653cc34536370ae9f4c3de2964d5b6858c6f112cf473f54b9b66" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.804244 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-zssbq" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.804610 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerName="nova-scheduler-scheduler" containerID="cri-o://329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" gracePeriod=30 Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.857158 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:43:41 crc kubenswrapper[4884]: E1128 15:43:41.864559 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="dnsmasq-dns" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.868223 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="dnsmasq-dns" Nov 28 15:43:41 crc kubenswrapper[4884]: E1128 15:43:41.868369 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bba819eb-5c1d-410b-9a6e-59c00da11771" containerName="nova-cell1-conductor-db-sync" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.868446 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bba819eb-5c1d-410b-9a6e-59c00da11771" containerName="nova-cell1-conductor-db-sync" Nov 28 15:43:41 crc kubenswrapper[4884]: E1128 15:43:41.868553 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b792e18-e8f2-4f95-ad78-a9657e30d651" containerName="nova-manage" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.868634 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b792e18-e8f2-4f95-ad78-a9657e30d651" containerName="nova-manage" Nov 28 15:43:41 crc kubenswrapper[4884]: E1128 15:43:41.868726 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="init" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.868796 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="init" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.869199 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9242cf5-22c8-48a9-9d72-6027ba8f5fa6" containerName="dnsmasq-dns" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.869315 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b792e18-e8f2-4f95-ad78-a9657e30d651" containerName="nova-manage" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.869392 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bba819eb-5c1d-410b-9a6e-59c00da11771" containerName="nova-cell1-conductor-db-sync" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.870079 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.870238 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.876273 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952331 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle\") pod \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952367 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5jnj\" (UniqueName: \"kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj\") pod \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952385 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs\") pod \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952478 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data\") pod \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs\") pod \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\" (UID: \"41a05e91-6bd1-426a-b9d1-f89fb9ce3d76\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952546 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") pod \"bba819eb-5c1d-410b-9a6e-59c00da11771\" (UID: \"bba819eb-5c1d-410b-9a6e-59c00da11771\") " Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952781 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952875 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.952944 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqz5v\" (UniqueName: \"kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.953380 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs" (OuterVolumeSpecName: "logs") pod "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" (UID: "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.957463 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bba819eb-5c1d-410b-9a6e-59c00da11771" (UID: "bba819eb-5c1d-410b-9a6e-59c00da11771"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.958923 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj" (OuterVolumeSpecName: "kube-api-access-s5jnj") pod "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" (UID: "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"). InnerVolumeSpecName "kube-api-access-s5jnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.980794 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data" (OuterVolumeSpecName: "config-data") pod "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" (UID: "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:41 crc kubenswrapper[4884]: I1128 15:43:41.991999 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" (UID: "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.006224 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" (UID: "41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.054798 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055074 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqz5v\" (UniqueName: \"kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055356 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055414 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5jnj\" (UniqueName: \"kubernetes.io/projected/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-kube-api-access-s5jnj\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055477 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055541 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055595 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.055647 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bba819eb-5c1d-410b-9a6e-59c00da11771-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.059176 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.062905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.070156 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqz5v\" (UniqueName: \"kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v\") pod \"nova-cell1-conductor-0\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.195022 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.763743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.814733 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.814824 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c1e2007d-c536-47f5-9d03-92069c96f654","Type":"ContainerStarted","Data":"1180b9da0455c776e4db49d03528dd64ad9d827c0a6a276293c6e32165d2e88e"} Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.928909 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.945280 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.976620 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:42 crc kubenswrapper[4884]: E1128 15:43:42.977168 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-metadata" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.977188 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-metadata" Nov 28 15:43:42 crc kubenswrapper[4884]: E1128 15:43:42.977202 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-log" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.977210 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-log" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.977427 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-metadata" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.977449 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" containerName="nova-metadata-log" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.978473 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.981472 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.981801 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 15:43:42 crc kubenswrapper[4884]: I1128 15:43:42.995964 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.083068 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.083131 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l27tn\" (UniqueName: \"kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.083209 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.083249 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.083438 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.185628 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.185699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.185777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l27tn\" (UniqueName: \"kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.185795 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.185849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.186556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.191157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.192013 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.192474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.217463 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l27tn\" (UniqueName: \"kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn\") pod \"nova-metadata-0\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.300690 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:43:43 crc kubenswrapper[4884]: W1128 15:43:43.743904 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6eb2f4f0_9527_4a35_a9c0_e32c14102412.slice/crio-c41d3346f6e36c5301057c18946867b508984a61e31997103864b18457ec0779 WatchSource:0}: Error finding container c41d3346f6e36c5301057c18946867b508984a61e31997103864b18457ec0779: Status 404 returned error can't find the container with id c41d3346f6e36c5301057c18946867b508984a61e31997103864b18457ec0779 Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.745383 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:43:43 crc kubenswrapper[4884]: E1128 15:43:43.809188 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:43:43 crc kubenswrapper[4884]: E1128 15:43:43.810665 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:43:43 crc kubenswrapper[4884]: E1128 15:43:43.811965 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:43:43 crc kubenswrapper[4884]: E1128 15:43:43.812001 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerName="nova-scheduler-scheduler" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.824674 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerStarted","Data":"c41d3346f6e36c5301057c18946867b508984a61e31997103864b18457ec0779"} Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.826522 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c1e2007d-c536-47f5-9d03-92069c96f654","Type":"ContainerStarted","Data":"831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8"} Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.827813 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:43 crc kubenswrapper[4884]: I1128 15:43:43.855031 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.855015068 podStartE2EDuration="2.855015068s" podCreationTimestamp="2025-11-28 15:43:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:43.847132437 +0000 UTC m=+1463.409916258" watchObservedRunningTime="2025-11-28 15:43:43.855015068 +0000 UTC m=+1463.417798869" Nov 28 15:43:44 crc kubenswrapper[4884]: I1128 15:43:44.701106 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41a05e91-6bd1-426a-b9d1-f89fb9ce3d76" path="/var/lib/kubelet/pods/41a05e91-6bd1-426a-b9d1-f89fb9ce3d76/volumes" Nov 28 15:43:44 crc kubenswrapper[4884]: I1128 15:43:44.848558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerStarted","Data":"e3f5e31528ef3c065f96ad92bea080bc5996a8043131d95f1ee95074faa786e5"} Nov 28 15:43:44 crc kubenswrapper[4884]: I1128 15:43:44.848653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerStarted","Data":"63149cdb78b61ff8605f380c5ac1f511b919aa21dd5606a1425d16d16f5cff95"} Nov 28 15:43:44 crc kubenswrapper[4884]: I1128 15:43:44.891392 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.891370918 podStartE2EDuration="2.891370918s" podCreationTimestamp="2025-11-28 15:43:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:44.883841346 +0000 UTC m=+1464.446625177" watchObservedRunningTime="2025-11-28 15:43:44.891370918 +0000 UTC m=+1464.454154729" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.711097 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.849556 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs\") pod \"c7721626-591f-4801-a8bd-0599b7a9bc80\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.849833 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle\") pod \"c7721626-591f-4801-a8bd-0599b7a9bc80\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.849929 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data\") pod \"c7721626-591f-4801-a8bd-0599b7a9bc80\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.850039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gb4b\" (UniqueName: \"kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b\") pod \"c7721626-591f-4801-a8bd-0599b7a9bc80\" (UID: \"c7721626-591f-4801-a8bd-0599b7a9bc80\") " Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.852315 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs" (OuterVolumeSpecName: "logs") pod "c7721626-591f-4801-a8bd-0599b7a9bc80" (UID: "c7721626-591f-4801-a8bd-0599b7a9bc80"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.859357 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b" (OuterVolumeSpecName: "kube-api-access-7gb4b") pod "c7721626-591f-4801-a8bd-0599b7a9bc80" (UID: "c7721626-591f-4801-a8bd-0599b7a9bc80"). InnerVolumeSpecName "kube-api-access-7gb4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.860270 4884 generic.go:334] "Generic (PLEG): container finished" podID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerID="83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af" exitCode=0 Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.860327 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerDied","Data":"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af"} Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.860352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7721626-591f-4801-a8bd-0599b7a9bc80","Type":"ContainerDied","Data":"185ebc782e52f756b5e076a40d9ac489d10c3708b8149f4bd091bdc7a5e5964e"} Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.860368 4884 scope.go:117] "RemoveContainer" containerID="83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.860484 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.863284 4884 generic.go:334] "Generic (PLEG): container finished" podID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerID="329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" exitCode=0 Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.864130 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52b4235-2446-4735-afa6-83e6e4e57da1","Type":"ContainerDied","Data":"329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661"} Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.875921 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7721626-591f-4801-a8bd-0599b7a9bc80" (UID: "c7721626-591f-4801-a8bd-0599b7a9bc80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.882064 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data" (OuterVolumeSpecName: "config-data") pod "c7721626-591f-4801-a8bd-0599b7a9bc80" (UID: "c7721626-591f-4801-a8bd-0599b7a9bc80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.912449 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.915365 4884 scope.go:117] "RemoveContainer" containerID="f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.940013 4884 scope.go:117] "RemoveContainer" containerID="83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af" Nov 28 15:43:45 crc kubenswrapper[4884]: E1128 15:43:45.980660 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af\": container with ID starting with 83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af not found: ID does not exist" containerID="83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.980714 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af"} err="failed to get container status \"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af\": rpc error: code = NotFound desc = could not find container \"83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af\": container with ID starting with 83940dae6e09a895c8d59ded28cfa5c47ab37b00d2fb972077bad0442274a5af not found: ID does not exist" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.980740 4884 scope.go:117] "RemoveContainer" containerID="f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379" Nov 28 15:43:45 crc kubenswrapper[4884]: E1128 15:43:45.981203 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379\": container with ID starting with f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379 not found: ID does not exist" containerID="f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.981237 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379"} err="failed to get container status \"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379\": rpc error: code = NotFound desc = could not find container \"f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379\": container with ID starting with f86fe3d39239c73cc6f855468f0f63a11ffd0aa5200423245868d2866090d379 not found: ID does not exist" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.982799 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gb4b\" (UniqueName: \"kubernetes.io/projected/c7721626-591f-4801-a8bd-0599b7a9bc80-kube-api-access-7gb4b\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.982821 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7721626-591f-4801-a8bd-0599b7a9bc80-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.982832 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:45 crc kubenswrapper[4884]: I1128 15:43:45.982843 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7721626-591f-4801-a8bd-0599b7a9bc80-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.083823 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle\") pod \"f52b4235-2446-4735-afa6-83e6e4e57da1\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.083922 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4bgz\" (UniqueName: \"kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz\") pod \"f52b4235-2446-4735-afa6-83e6e4e57da1\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.084047 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data\") pod \"f52b4235-2446-4735-afa6-83e6e4e57da1\" (UID: \"f52b4235-2446-4735-afa6-83e6e4e57da1\") " Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.087214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz" (OuterVolumeSpecName: "kube-api-access-s4bgz") pod "f52b4235-2446-4735-afa6-83e6e4e57da1" (UID: "f52b4235-2446-4735-afa6-83e6e4e57da1"). InnerVolumeSpecName "kube-api-access-s4bgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.106935 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data" (OuterVolumeSpecName: "config-data") pod "f52b4235-2446-4735-afa6-83e6e4e57da1" (UID: "f52b4235-2446-4735-afa6-83e6e4e57da1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.128909 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f52b4235-2446-4735-afa6-83e6e4e57da1" (UID: "f52b4235-2446-4735-afa6-83e6e4e57da1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.186775 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4bgz\" (UniqueName: \"kubernetes.io/projected/f52b4235-2446-4735-afa6-83e6e4e57da1-kube-api-access-s4bgz\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.186810 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.186847 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52b4235-2446-4735-afa6-83e6e4e57da1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.196929 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.208285 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.231346 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: E1128 15:43:46.231731 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerName="nova-scheduler-scheduler" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.231747 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerName="nova-scheduler-scheduler" Nov 28 15:43:46 crc kubenswrapper[4884]: E1128 15:43:46.231792 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-log" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.231799 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-log" Nov 28 15:43:46 crc kubenswrapper[4884]: E1128 15:43:46.231813 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-api" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.231819 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-api" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.231981 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-api" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.232007 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" containerName="nova-api-log" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.232023 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" containerName="nova-scheduler-scheduler" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.232999 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.235517 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.248468 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.390903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.391000 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.391103 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.391382 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px257\" (UniqueName: \"kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.493333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.493518 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px257\" (UniqueName: \"kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.493606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.493732 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.495551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.508356 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.508528 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.513411 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px257\" (UniqueName: \"kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257\") pod \"nova-api-0\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.568698 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.700009 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7721626-591f-4801-a8bd-0599b7a9bc80" path="/var/lib/kubelet/pods/c7721626-591f-4801-a8bd-0599b7a9bc80/volumes" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.874807 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52b4235-2446-4735-afa6-83e6e4e57da1","Type":"ContainerDied","Data":"31a82e3b8c0336d832fdc9d917735949acb98090ab0c3fd14f9ebea294494f44"} Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.874849 4884 scope.go:117] "RemoveContainer" containerID="329e82f36f8063cf0a7cbdf7732599886050776e9e49386d967c57e563e12661" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.874877 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.894254 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.910464 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.922564 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.924097 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.927278 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:43:46 crc kubenswrapper[4884]: I1128 15:43:46.933184 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.003468 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:43:47 crc kubenswrapper[4884]: W1128 15:43:47.004663 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3705d656_ed62_4288_ba3e_f10fc1298185.slice/crio-cc858e2c6694a742796a87ee9754e2433ac6e91dcd1128113c6651deff6db227 WatchSource:0}: Error finding container cc858e2c6694a742796a87ee9754e2433ac6e91dcd1128113c6651deff6db227: Status 404 returned error can't find the container with id cc858e2c6694a742796a87ee9754e2433ac6e91dcd1128113c6651deff6db227 Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.104408 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.104532 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmrtn\" (UniqueName: \"kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.104568 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.207155 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmrtn\" (UniqueName: \"kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.207292 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.207644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.212352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.212367 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.225330 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmrtn\" (UniqueName: \"kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn\") pod \"nova-scheduler-0\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.227986 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.240602 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.704843 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.890344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8ffbf33c-053c-4b3b-b5ff-93c2391d4170","Type":"ContainerStarted","Data":"e7a44241613e1dddf037308ac1d4ed0de5ef27bd2fb3febbb1e7834558cd94ce"} Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.907759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerStarted","Data":"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1"} Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.908133 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerStarted","Data":"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502"} Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.908297 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerStarted","Data":"cc858e2c6694a742796a87ee9754e2433ac6e91dcd1128113c6651deff6db227"} Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.924767 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.9247445939999999 podStartE2EDuration="1.924744594s" podCreationTimestamp="2025-11-28 15:43:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:47.903674396 +0000 UTC m=+1467.466458197" watchObservedRunningTime="2025-11-28 15:43:47.924744594 +0000 UTC m=+1467.487528395" Nov 28 15:43:47 crc kubenswrapper[4884]: I1128 15:43:47.948543 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.948527908 podStartE2EDuration="1.948527908s" podCreationTimestamp="2025-11-28 15:43:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:43:47.935253058 +0000 UTC m=+1467.498036879" watchObservedRunningTime="2025-11-28 15:43:47.948527908 +0000 UTC m=+1467.511311709" Nov 28 15:43:48 crc kubenswrapper[4884]: I1128 15:43:48.300927 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:43:48 crc kubenswrapper[4884]: I1128 15:43:48.300979 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:43:48 crc kubenswrapper[4884]: I1128 15:43:48.709731 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f52b4235-2446-4735-afa6-83e6e4e57da1" path="/var/lib/kubelet/pods/f52b4235-2446-4735-afa6-83e6e4e57da1/volumes" Nov 28 15:43:48 crc kubenswrapper[4884]: I1128 15:43:48.923692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8ffbf33c-053c-4b3b-b5ff-93c2391d4170","Type":"ContainerStarted","Data":"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10"} Nov 28 15:43:52 crc kubenswrapper[4884]: I1128 15:43:52.242171 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:43:53 crc kubenswrapper[4884]: I1128 15:43:53.301908 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:43:53 crc kubenswrapper[4884]: I1128 15:43:53.302285 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:43:54 crc kubenswrapper[4884]: I1128 15:43:54.320315 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:54 crc kubenswrapper[4884]: I1128 15:43:54.320364 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:56 crc kubenswrapper[4884]: I1128 15:43:56.569497 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:43:56 crc kubenswrapper[4884]: I1128 15:43:56.570146 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:43:57 crc kubenswrapper[4884]: I1128 15:43:57.242582 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:43:57 crc kubenswrapper[4884]: I1128 15:43:57.287086 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:43:57 crc kubenswrapper[4884]: I1128 15:43:57.652333 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:57 crc kubenswrapper[4884]: I1128 15:43:57.652320 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.193:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:43:58 crc kubenswrapper[4884]: I1128 15:43:58.083429 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:44:01 crc kubenswrapper[4884]: I1128 15:44:01.270511 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:44:03 crc kubenswrapper[4884]: I1128 15:44:03.307777 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:44:03 crc kubenswrapper[4884]: I1128 15:44:03.309904 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:44:03 crc kubenswrapper[4884]: I1128 15:44:03.315293 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:44:04 crc kubenswrapper[4884]: I1128 15:44:04.127156 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.134898 4884 generic.go:334] "Generic (PLEG): container finished" podID="cc473f47-75ab-4666-a51e-14a7222c2383" containerID="423fc717ca99c44555115e638f9599ae77f0b956b2f33212fa986e4d737e108b" exitCode=137 Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.135002 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cc473f47-75ab-4666-a51e-14a7222c2383","Type":"ContainerDied","Data":"423fc717ca99c44555115e638f9599ae77f0b956b2f33212fa986e4d737e108b"} Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.135222 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"cc473f47-75ab-4666-a51e-14a7222c2383","Type":"ContainerDied","Data":"e1d729e399f243ead864801c6198a28c928f09155449e3776c6bfe31dbca12f7"} Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.135241 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1d729e399f243ead864801c6198a28c928f09155449e3776c6bfe31dbca12f7" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.169645 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.330445 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle\") pod \"cc473f47-75ab-4666-a51e-14a7222c2383\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.330627 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gzht\" (UniqueName: \"kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht\") pod \"cc473f47-75ab-4666-a51e-14a7222c2383\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.330835 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data\") pod \"cc473f47-75ab-4666-a51e-14a7222c2383\" (UID: \"cc473f47-75ab-4666-a51e-14a7222c2383\") " Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.336128 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht" (OuterVolumeSpecName: "kube-api-access-7gzht") pod "cc473f47-75ab-4666-a51e-14a7222c2383" (UID: "cc473f47-75ab-4666-a51e-14a7222c2383"). InnerVolumeSpecName "kube-api-access-7gzht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.355919 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc473f47-75ab-4666-a51e-14a7222c2383" (UID: "cc473f47-75ab-4666-a51e-14a7222c2383"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.356271 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data" (OuterVolumeSpecName: "config-data") pod "cc473f47-75ab-4666-a51e-14a7222c2383" (UID: "cc473f47-75ab-4666-a51e-14a7222c2383"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.434385 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.434600 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gzht\" (UniqueName: \"kubernetes.io/projected/cc473f47-75ab-4666-a51e-14a7222c2383-kube-api-access-7gzht\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:05 crc kubenswrapper[4884]: I1128 15:44:05.434666 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc473f47-75ab-4666-a51e-14a7222c2383-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.148433 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.202517 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.234109 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.246168 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:44:06 crc kubenswrapper[4884]: E1128 15:44:06.246834 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc473f47-75ab-4666-a51e-14a7222c2383" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.246875 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc473f47-75ab-4666-a51e-14a7222c2383" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.247341 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc473f47-75ab-4666-a51e-14a7222c2383" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.250666 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.253607 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.265565 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.265855 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.266072 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.350363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.350475 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzt8n\" (UniqueName: \"kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.350503 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.350544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.350881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.453129 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.453212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.453240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzt8n\" (UniqueName: \"kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.453277 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.453358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.457482 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.458214 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.458269 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.458670 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.478810 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzt8n\" (UniqueName: \"kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n\") pod \"nova-cell1-novncproxy-0\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.572867 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.573282 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.574303 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.577110 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.583670 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:44:06 crc kubenswrapper[4884]: I1128 15:44:06.723035 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc473f47-75ab-4666-a51e-14a7222c2383" path="/var/lib/kubelet/pods/cc473f47-75ab-4666-a51e-14a7222c2383/volumes" Nov 28 15:44:07 crc kubenswrapper[4884]: W1128 15:44:07.057544 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08c698b7_a3f8_4639_8237_a8e005ae2669.slice/crio-315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369 WatchSource:0}: Error finding container 315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369: Status 404 returned error can't find the container with id 315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369 Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.065154 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.162867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"08c698b7-a3f8-4639-8237-a8e005ae2669","Type":"ContainerStarted","Data":"315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369"} Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.163309 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.170497 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.361502 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.369478 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.381814 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475322 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475437 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475494 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475519 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475786 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.475827 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnspd\" (UniqueName: \"kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.576947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577492 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577608 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnspd\" (UniqueName: \"kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577633 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577687 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.577783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.578243 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.581480 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.581527 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.582732 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.603230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnspd\" (UniqueName: \"kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd\") pod \"dnsmasq-dns-5c7b6c5df9-4mp84\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:07 crc kubenswrapper[4884]: I1128 15:44:07.694151 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:08 crc kubenswrapper[4884]: I1128 15:44:08.176660 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"08c698b7-a3f8-4639-8237-a8e005ae2669","Type":"ContainerStarted","Data":"763ed99f997eeeb0d08695603ce8409930b1a454f8319f482b75714ea0827268"} Nov 28 15:44:08 crc kubenswrapper[4884]: I1128 15:44:08.197874 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.197858578 podStartE2EDuration="2.197858578s" podCreationTimestamp="2025-11-28 15:44:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:08.194628119 +0000 UTC m=+1487.757411920" watchObservedRunningTime="2025-11-28 15:44:08.197858578 +0000 UTC m=+1487.760642379" Nov 28 15:44:08 crc kubenswrapper[4884]: I1128 15:44:08.215840 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.156998 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.157590 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-central-agent" containerID="cri-o://0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9" gracePeriod=30 Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.157709 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="proxy-httpd" containerID="cri-o://90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73" gracePeriod=30 Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.157784 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="sg-core" containerID="cri-o://9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a" gracePeriod=30 Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.157875 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-notification-agent" containerID="cri-o://a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd" gracePeriod=30 Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.187713 4884 generic.go:334] "Generic (PLEG): container finished" podID="006d76b7-d405-4056-a55b-f01661cde456" containerID="1848c2fd98ec9c6cc4e03c50a5728558a64d191560cff7ff8ba31f079d3152a9" exitCode=0 Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.187815 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" event={"ID":"006d76b7-d405-4056-a55b-f01661cde456","Type":"ContainerDied","Data":"1848c2fd98ec9c6cc4e03c50a5728558a64d191560cff7ff8ba31f079d3152a9"} Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.187869 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" event={"ID":"006d76b7-d405-4056-a55b-f01661cde456","Type":"ContainerStarted","Data":"85d5d24d88e32819495a90b2605529d70054f09d61bf3ea16d4a41a2d6121778"} Nov 28 15:44:09 crc kubenswrapper[4884]: I1128 15:44:09.826529 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.206178 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" event={"ID":"006d76b7-d405-4056-a55b-f01661cde456","Type":"ContainerStarted","Data":"8bd58f8dfb10dddf860b7726450d9b6180488f6c7bec696c42d57ea2a88b8b6e"} Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.208439 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.219364 4884 generic.go:334] "Generic (PLEG): container finished" podID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerID="90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73" exitCode=0 Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.219397 4884 generic.go:334] "Generic (PLEG): container finished" podID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerID="9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a" exitCode=2 Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.219407 4884 generic.go:334] "Generic (PLEG): container finished" podID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerID="0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9" exitCode=0 Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.219617 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-log" containerID="cri-o://10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502" gracePeriod=30 Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.220240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerDied","Data":"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73"} Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.220296 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerDied","Data":"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a"} Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.220305 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerDied","Data":"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9"} Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.220374 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-api" containerID="cri-o://b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1" gracePeriod=30 Nov 28 15:44:10 crc kubenswrapper[4884]: I1128 15:44:10.243850 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" podStartSLOduration=3.243824874 podStartE2EDuration="3.243824874s" podCreationTimestamp="2025-11-28 15:44:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:10.231711911 +0000 UTC m=+1489.794495712" watchObservedRunningTime="2025-11-28 15:44:10.243824874 +0000 UTC m=+1489.806608695" Nov 28 15:44:11 crc kubenswrapper[4884]: I1128 15:44:11.230313 4884 generic.go:334] "Generic (PLEG): container finished" podID="3705d656-ed62-4288-ba3e-f10fc1298185" containerID="10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502" exitCode=143 Nov 28 15:44:11 crc kubenswrapper[4884]: I1128 15:44:11.230398 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerDied","Data":"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502"} Nov 28 15:44:11 crc kubenswrapper[4884]: I1128 15:44:11.577543 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.213117 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.254539 4884 generic.go:334] "Generic (PLEG): container finished" podID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerID="a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd" exitCode=0 Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.255049 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.255563 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerDied","Data":"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd"} Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.255773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2","Type":"ContainerDied","Data":"1e39c8ec3f6e545e451da5047087ddefaccf509123f49654450c7fb5dbf4fccb"} Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.255832 4884 scope.go:117] "RemoveContainer" containerID="90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.282521 4884 scope.go:117] "RemoveContainer" containerID="9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.306401 4884 scope.go:117] "RemoveContainer" containerID="a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.330780 4884 scope.go:117] "RemoveContainer" containerID="0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.355401 4884 scope.go:117] "RemoveContainer" containerID="90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.355824 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73\": container with ID starting with 90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73 not found: ID does not exist" containerID="90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.355856 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73"} err="failed to get container status \"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73\": rpc error: code = NotFound desc = could not find container \"90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73\": container with ID starting with 90e12f0d5b1993adbc6af31d241e330e8a2ce1f3f89dc06060a3ef6ca4458f73 not found: ID does not exist" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.355876 4884 scope.go:117] "RemoveContainer" containerID="9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.356448 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a\": container with ID starting with 9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a not found: ID does not exist" containerID="9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.356473 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a"} err="failed to get container status \"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a\": rpc error: code = NotFound desc = could not find container \"9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a\": container with ID starting with 9d0c59d3695a3908f5ab1f353645e5716a800cc26ca1947a1aee0613a59c107a not found: ID does not exist" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.356489 4884 scope.go:117] "RemoveContainer" containerID="a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.356886 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd\": container with ID starting with a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd not found: ID does not exist" containerID="a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.356908 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd"} err="failed to get container status \"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd\": rpc error: code = NotFound desc = could not find container \"a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd\": container with ID starting with a11dac6001c5c18b7787df99b7b03883ae0b41344560269c8908fce2edb0accd not found: ID does not exist" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.356925 4884 scope.go:117] "RemoveContainer" containerID="0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.357490 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9\": container with ID starting with 0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9 not found: ID does not exist" containerID="0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.357508 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9"} err="failed to get container status \"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9\": rpc error: code = NotFound desc = could not find container \"0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9\": container with ID starting with 0bfbc6fb875f508ffdb676828a359417252905cde51b49af63acb8d676cdf4d9 not found: ID does not exist" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375354 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r85dl\" (UniqueName: \"kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375510 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375816 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375869 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375902 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375970 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.375996 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts\") pod \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\" (UID: \"78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2\") " Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.378259 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.379638 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.383970 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts" (OuterVolumeSpecName: "scripts") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.386361 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl" (OuterVolumeSpecName: "kube-api-access-r85dl") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "kube-api-access-r85dl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.410920 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.443159 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.474625 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478049 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478228 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r85dl\" (UniqueName: \"kubernetes.io/projected/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-kube-api-access-r85dl\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478311 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478386 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478458 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478527 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.478610 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.491484 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data" (OuterVolumeSpecName: "config-data") pod "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" (UID: "78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.580407 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.602240 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.614079 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.662944 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.663454 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="sg-core" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663478 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="sg-core" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.663495 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="proxy-httpd" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663502 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="proxy-httpd" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.663511 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-notification-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663518 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-notification-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: E1128 15:44:12.663555 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-central-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663561 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-central-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663732 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-notification-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663760 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="ceilometer-central-agent" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663775 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="proxy-httpd" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.663793 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" containerName="sg-core" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.666010 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.670432 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.670758 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.671030 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.708443 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2" path="/var/lib/kubelet/pods/78e80f6b-bdb1-4fd7-9a51-7bc7b71086c2/volumes" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.709207 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784312 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxpd2\" (UniqueName: \"kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784347 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784370 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784476 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784519 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784542 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.784660 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.877413 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod41a05e91-6bd1-426a-b9d1-f89fb9ce3d76"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod41a05e91-6bd1-426a-b9d1-f89fb9ce3d76] : Timed out while waiting for systemd to remove kubepods-besteffort-pod41a05e91_6bd1_426a_b9d1_f89fb9ce3d76.slice" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886294 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886378 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886456 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886483 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxpd2\" (UniqueName: \"kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886931 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.886994 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.890230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.890958 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.891080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.890701 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.896828 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.908376 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxpd2\" (UniqueName: \"kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2\") pod \"ceilometer-0\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " pod="openstack/ceilometer-0" Nov 28 15:44:12 crc kubenswrapper[4884]: I1128 15:44:12.998933 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.297991 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:44:13 crc kubenswrapper[4884]: W1128 15:44:13.310351 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91c5a29a_fbc0_4c91_a3a3_e4d96c911c98.slice/crio-d523e316947963f98d42fc3f76f1e604a0f39747037e31c56ae19f0f8aa9da85 WatchSource:0}: Error finding container d523e316947963f98d42fc3f76f1e604a0f39747037e31c56ae19f0f8aa9da85: Status 404 returned error can't find the container with id d523e316947963f98d42fc3f76f1e604a0f39747037e31c56ae19f0f8aa9da85 Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.313939 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.414765 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.434698 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.461405 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.604864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.605191 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgj55\" (UniqueName: \"kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.605234 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.707487 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgj55\" (UniqueName: \"kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.708426 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.708868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.709444 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.709700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.731120 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgj55\" (UniqueName: \"kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55\") pod \"redhat-operators-66vj2\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.772288 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.775938 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.914752 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data\") pod \"3705d656-ed62-4288-ba3e-f10fc1298185\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.915323 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs\") pod \"3705d656-ed62-4288-ba3e-f10fc1298185\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.915350 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle\") pod \"3705d656-ed62-4288-ba3e-f10fc1298185\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.915455 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px257\" (UniqueName: \"kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257\") pod \"3705d656-ed62-4288-ba3e-f10fc1298185\" (UID: \"3705d656-ed62-4288-ba3e-f10fc1298185\") " Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.916173 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs" (OuterVolumeSpecName: "logs") pod "3705d656-ed62-4288-ba3e-f10fc1298185" (UID: "3705d656-ed62-4288-ba3e-f10fc1298185"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.925769 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257" (OuterVolumeSpecName: "kube-api-access-px257") pod "3705d656-ed62-4288-ba3e-f10fc1298185" (UID: "3705d656-ed62-4288-ba3e-f10fc1298185"). InnerVolumeSpecName "kube-api-access-px257". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.978442 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3705d656-ed62-4288-ba3e-f10fc1298185" (UID: "3705d656-ed62-4288-ba3e-f10fc1298185"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:13 crc kubenswrapper[4884]: I1128 15:44:13.982559 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data" (OuterVolumeSpecName: "config-data") pod "3705d656-ed62-4288-ba3e-f10fc1298185" (UID: "3705d656-ed62-4288-ba3e-f10fc1298185"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.018103 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.018136 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3705d656-ed62-4288-ba3e-f10fc1298185-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.018145 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3705d656-ed62-4288-ba3e-f10fc1298185-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.018158 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px257\" (UniqueName: \"kubernetes.io/projected/3705d656-ed62-4288-ba3e-f10fc1298185-kube-api-access-px257\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.295536 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerStarted","Data":"5f7512ac40024faf5c002b3ea07aae281fd765dc79598f4d2e49a37be1071a24"} Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.295991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerStarted","Data":"d523e316947963f98d42fc3f76f1e604a0f39747037e31c56ae19f0f8aa9da85"} Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.297664 4884 generic.go:334] "Generic (PLEG): container finished" podID="3705d656-ed62-4288-ba3e-f10fc1298185" containerID="b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1" exitCode=0 Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.297695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerDied","Data":"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1"} Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.297711 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3705d656-ed62-4288-ba3e-f10fc1298185","Type":"ContainerDied","Data":"cc858e2c6694a742796a87ee9754e2433ac6e91dcd1128113c6651deff6db227"} Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.297726 4884 scope.go:117] "RemoveContainer" containerID="b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.297836 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.318463 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.321445 4884 scope.go:117] "RemoveContainer" containerID="10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.343404 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.363423 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.372466 4884 scope.go:117] "RemoveContainer" containerID="b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1" Nov 28 15:44:14 crc kubenswrapper[4884]: E1128 15:44:14.372892 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1\": container with ID starting with b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1 not found: ID does not exist" containerID="b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.372930 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1"} err="failed to get container status \"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1\": rpc error: code = NotFound desc = could not find container \"b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1\": container with ID starting with b3811f5e5ccf9ec7ac8a129a0419b320312f2f0c030ec3f81bc4b2b06adf86b1 not found: ID does not exist" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.372957 4884 scope.go:117] "RemoveContainer" containerID="10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502" Nov 28 15:44:14 crc kubenswrapper[4884]: E1128 15:44:14.373223 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502\": container with ID starting with 10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502 not found: ID does not exist" containerID="10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.373253 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502"} err="failed to get container status \"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502\": rpc error: code = NotFound desc = could not find container \"10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502\": container with ID starting with 10e129d1cf7db10eb387ea2ba5fcc7938fc319cf18f7db97f5903e2383898502 not found: ID does not exist" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.375149 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:14 crc kubenswrapper[4884]: E1128 15:44:14.375645 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-api" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.375666 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-api" Nov 28 15:44:14 crc kubenswrapper[4884]: E1128 15:44:14.375683 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-log" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.375691 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-log" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.375891 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-api" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.375935 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" containerName="nova-api-log" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.376877 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.383656 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.384219 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.384498 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.385153 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.527069 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.528649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.528713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.528773 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvqfj\" (UniqueName: \"kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.528883 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.529323 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.630947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.631114 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.631173 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.631198 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.631221 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.631250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvqfj\" (UniqueName: \"kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.638900 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.639486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.639904 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.640150 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.644880 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.656698 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvqfj\" (UniqueName: \"kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj\") pod \"nova-api-0\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.694345 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:14 crc kubenswrapper[4884]: I1128 15:44:14.700163 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3705d656-ed62-4288-ba3e-f10fc1298185" path="/var/lib/kubelet/pods/3705d656-ed62-4288-ba3e-f10fc1298185/volumes" Nov 28 15:44:15 crc kubenswrapper[4884]: W1128 15:44:15.207259 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda61d4d21_a654_4b24_9a4e_00d9ed08297e.slice/crio-0bf090b43fd1e326c170f9a417d6560fbcf3ba50270fbe95c7e5990598e69352 WatchSource:0}: Error finding container 0bf090b43fd1e326c170f9a417d6560fbcf3ba50270fbe95c7e5990598e69352: Status 404 returned error can't find the container with id 0bf090b43fd1e326c170f9a417d6560fbcf3ba50270fbe95c7e5990598e69352 Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.210735 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.315661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerStarted","Data":"b0e0f537c987f444034b20d830933a5ee4e9b9836a56cfdf4c62f7d8b6d604d0"} Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.319642 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerID="2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2" exitCode=0 Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.319809 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerDied","Data":"2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2"} Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.319906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerStarted","Data":"0a1ae262d1747d5e07fa4d3465913515029152fb749b1f9702de775bd646bb13"} Nov 28 15:44:15 crc kubenswrapper[4884]: I1128 15:44:15.325442 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerStarted","Data":"0bf090b43fd1e326c170f9a417d6560fbcf3ba50270fbe95c7e5990598e69352"} Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.335665 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerStarted","Data":"77143d790300f24e1ec71423bf2b5cceb1697eca359224ae98446bd1168afb55"} Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.337945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerStarted","Data":"1abf921eaf64d303e283dac0f5fd09c6477854284f0e13530abfecf77429cf82"} Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.337991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerStarted","Data":"9522c0e7e07853449867a248b47d2b1ba02a40bb318fbe036c5ef6afb8a941c9"} Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.362618 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.362601008 podStartE2EDuration="2.362601008s" podCreationTimestamp="2025-11-28 15:44:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:16.357492832 +0000 UTC m=+1495.920276633" watchObservedRunningTime="2025-11-28 15:44:16.362601008 +0000 UTC m=+1495.925384809" Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.578488 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:16 crc kubenswrapper[4884]: I1128 15:44:16.616898 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.353813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerStarted","Data":"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a"} Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.470335 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.610515 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-dtn4c"] Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.611657 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.620084 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.620486 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.623498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhnpj\" (UniqueName: \"kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.623615 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.623678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.623740 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.635621 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dtn4c"] Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.697307 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.725592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.725647 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.725686 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.727114 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhnpj\" (UniqueName: \"kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.731456 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.732213 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.735038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.761719 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhnpj\" (UniqueName: \"kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj\") pod \"nova-cell1-cell-mapping-dtn4c\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.768069 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.768310 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="dnsmasq-dns" containerID="cri-o://30f8a4d90aad4194121e096b6c45c4a7746bc54c18f144f7f144a9493b003d9e" gracePeriod=10 Nov 28 15:44:17 crc kubenswrapper[4884]: I1128 15:44:17.934507 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.367635 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerID="a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a" exitCode=0 Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.367726 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerDied","Data":"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a"} Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.373167 4884 generic.go:334] "Generic (PLEG): container finished" podID="9dde288e-b169-42f7-af64-647eef580099" containerID="30f8a4d90aad4194121e096b6c45c4a7746bc54c18f144f7f144a9493b003d9e" exitCode=0 Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.374202 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" event={"ID":"9dde288e-b169-42f7-af64-647eef580099","Type":"ContainerDied","Data":"30f8a4d90aad4194121e096b6c45c4a7746bc54c18f144f7f144a9493b003d9e"} Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.433305 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-dtn4c"] Nov 28 15:44:18 crc kubenswrapper[4884]: W1128 15:44:18.441474 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c349ee9_213b_41b3_9a78_b6a846ba244a.slice/crio-52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083 WatchSource:0}: Error finding container 52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083: Status 404 returned error can't find the container with id 52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083 Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.683152 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.754574 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.754632 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.754730 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27v27\" (UniqueName: \"kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.754844 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.754936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.755069 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.767899 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27" (OuterVolumeSpecName: "kube-api-access-27v27") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "kube-api-access-27v27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.825735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.835523 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.843514 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.857218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config" (OuterVolumeSpecName: "config") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.857590 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858256 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") pod \"9dde288e-b169-42f7-af64-647eef580099\" (UID: \"9dde288e-b169-42f7-af64-647eef580099\") " Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858708 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858726 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858735 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27v27\" (UniqueName: \"kubernetes.io/projected/9dde288e-b169-42f7-af64-647eef580099-kube-api-access-27v27\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858746 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858756 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:18 crc kubenswrapper[4884]: W1128 15:44:18.858810 4884 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/9dde288e-b169-42f7-af64-647eef580099/volumes/kubernetes.io~configmap/ovsdbserver-nb Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.858818 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9dde288e-b169-42f7-af64-647eef580099" (UID: "9dde288e-b169-42f7-af64-647eef580099"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:44:18 crc kubenswrapper[4884]: I1128 15:44:18.961296 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9dde288e-b169-42f7-af64-647eef580099-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.382993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" event={"ID":"9dde288e-b169-42f7-af64-647eef580099","Type":"ContainerDied","Data":"b5e267bebc1a1476f792661862fac9d529759b08249ab34187709ef405116c75"} Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.383041 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-89mq2" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.383080 4884 scope.go:117] "RemoveContainer" containerID="30f8a4d90aad4194121e096b6c45c4a7746bc54c18f144f7f144a9493b003d9e" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.386886 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerStarted","Data":"4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce"} Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.388066 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.393468 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerStarted","Data":"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d"} Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.395670 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dtn4c" event={"ID":"1c349ee9-213b-41b3-9a78-b6a846ba244a","Type":"ContainerStarted","Data":"c60a6f26a660bec0bac432aab6278c3112868da8289bd4e2d9af5e68fee5fb95"} Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.395697 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dtn4c" event={"ID":"1c349ee9-213b-41b3-9a78-b6a846ba244a","Type":"ContainerStarted","Data":"52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083"} Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.410981 4884 scope.go:117] "RemoveContainer" containerID="3959288d20e50b1d754a23189e9cef53aa5c34cf9ec15292dbfa2b3606c62d76" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.445016 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.502690736 podStartE2EDuration="7.444991569s" podCreationTimestamp="2025-11-28 15:44:12 +0000 UTC" firstStartedPulling="2025-11-28 15:44:13.313659026 +0000 UTC m=+1492.876442827" lastFinishedPulling="2025-11-28 15:44:18.255959859 +0000 UTC m=+1497.818743660" observedRunningTime="2025-11-28 15:44:19.422766535 +0000 UTC m=+1498.985550356" watchObservedRunningTime="2025-11-28 15:44:19.444991569 +0000 UTC m=+1499.007775380" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.458667 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.473601 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-89mq2"] Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.477465 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-66vj2" podStartSLOduration=2.975171753 podStartE2EDuration="6.477444765s" podCreationTimestamp="2025-11-28 15:44:13 +0000 UTC" firstStartedPulling="2025-11-28 15:44:15.321611315 +0000 UTC m=+1494.884395116" lastFinishedPulling="2025-11-28 15:44:18.823884317 +0000 UTC m=+1498.386668128" observedRunningTime="2025-11-28 15:44:19.46134459 +0000 UTC m=+1499.024128401" watchObservedRunningTime="2025-11-28 15:44:19.477444765 +0000 UTC m=+1499.040228566" Nov 28 15:44:19 crc kubenswrapper[4884]: I1128 15:44:19.486895 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-dtn4c" podStartSLOduration=2.486879305 podStartE2EDuration="2.486879305s" podCreationTimestamp="2025-11-28 15:44:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:19.478679924 +0000 UTC m=+1499.041463725" watchObservedRunningTime="2025-11-28 15:44:19.486879305 +0000 UTC m=+1499.049663106" Nov 28 15:44:20 crc kubenswrapper[4884]: I1128 15:44:20.702569 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dde288e-b169-42f7-af64-647eef580099" path="/var/lib/kubelet/pods/9dde288e-b169-42f7-af64-647eef580099/volumes" Nov 28 15:44:23 crc kubenswrapper[4884]: I1128 15:44:23.773266 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:23 crc kubenswrapper[4884]: I1128 15:44:23.773771 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:24 crc kubenswrapper[4884]: I1128 15:44:24.451674 4884 generic.go:334] "Generic (PLEG): container finished" podID="1c349ee9-213b-41b3-9a78-b6a846ba244a" containerID="c60a6f26a660bec0bac432aab6278c3112868da8289bd4e2d9af5e68fee5fb95" exitCode=0 Nov 28 15:44:24 crc kubenswrapper[4884]: I1128 15:44:24.451718 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dtn4c" event={"ID":"1c349ee9-213b-41b3-9a78-b6a846ba244a","Type":"ContainerDied","Data":"c60a6f26a660bec0bac432aab6278c3112868da8289bd4e2d9af5e68fee5fb95"} Nov 28 15:44:24 crc kubenswrapper[4884]: I1128 15:44:24.701535 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:44:24 crc kubenswrapper[4884]: I1128 15:44:24.702313 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:44:24 crc kubenswrapper[4884]: I1128 15:44:24.819311 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-66vj2" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="registry-server" probeResult="failure" output=< Nov 28 15:44:24 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:44:24 crc kubenswrapper[4884]: > Nov 28 15:44:25 crc kubenswrapper[4884]: I1128 15:44:25.713614 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:25 crc kubenswrapper[4884]: I1128 15:44:25.713599 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.199:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:25 crc kubenswrapper[4884]: I1128 15:44:25.887683 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.018641 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle\") pod \"1c349ee9-213b-41b3-9a78-b6a846ba244a\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.018824 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhnpj\" (UniqueName: \"kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj\") pod \"1c349ee9-213b-41b3-9a78-b6a846ba244a\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.018960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data\") pod \"1c349ee9-213b-41b3-9a78-b6a846ba244a\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.019005 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts\") pod \"1c349ee9-213b-41b3-9a78-b6a846ba244a\" (UID: \"1c349ee9-213b-41b3-9a78-b6a846ba244a\") " Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.024784 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj" (OuterVolumeSpecName: "kube-api-access-fhnpj") pod "1c349ee9-213b-41b3-9a78-b6a846ba244a" (UID: "1c349ee9-213b-41b3-9a78-b6a846ba244a"). InnerVolumeSpecName "kube-api-access-fhnpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.034010 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts" (OuterVolumeSpecName: "scripts") pod "1c349ee9-213b-41b3-9a78-b6a846ba244a" (UID: "1c349ee9-213b-41b3-9a78-b6a846ba244a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.049244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c349ee9-213b-41b3-9a78-b6a846ba244a" (UID: "1c349ee9-213b-41b3-9a78-b6a846ba244a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.052708 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data" (OuterVolumeSpecName: "config-data") pod "1c349ee9-213b-41b3-9a78-b6a846ba244a" (UID: "1c349ee9-213b-41b3-9a78-b6a846ba244a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.123555 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.124035 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhnpj\" (UniqueName: \"kubernetes.io/projected/1c349ee9-213b-41b3-9a78-b6a846ba244a-kube-api-access-fhnpj\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.124217 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.124286 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1c349ee9-213b-41b3-9a78-b6a846ba244a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.469335 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-dtn4c" event={"ID":"1c349ee9-213b-41b3-9a78-b6a846ba244a","Type":"ContainerDied","Data":"52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083"} Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.469374 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-dtn4c" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.469379 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52365be123d17d2a6fd8ac061aed79eced68da74f6508a51636054da8b4ce083" Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.679790 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.680117 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-log" containerID="cri-o://9522c0e7e07853449867a248b47d2b1ba02a40bb318fbe036c5ef6afb8a941c9" gracePeriod=30 Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.680290 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-api" containerID="cri-o://1abf921eaf64d303e283dac0f5fd09c6477854284f0e13530abfecf77429cf82" gracePeriod=30 Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.687122 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.687333 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerName="nova-scheduler-scheduler" containerID="cri-o://cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" gracePeriod=30 Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.725637 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.726500 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" containerID="cri-o://63149cdb78b61ff8605f380c5ac1f511b919aa21dd5606a1425d16d16f5cff95" gracePeriod=30 Nov 28 15:44:26 crc kubenswrapper[4884]: I1128 15:44:26.726713 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" containerID="cri-o://e3f5e31528ef3c065f96ad92bea080bc5996a8043131d95f1ee95074faa786e5" gracePeriod=30 Nov 28 15:44:27 crc kubenswrapper[4884]: E1128 15:44:27.247707 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:44:27 crc kubenswrapper[4884]: E1128 15:44:27.249235 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:44:27 crc kubenswrapper[4884]: E1128 15:44:27.250718 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:44:27 crc kubenswrapper[4884]: E1128 15:44:27.250751 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerName="nova-scheduler-scheduler" Nov 28 15:44:27 crc kubenswrapper[4884]: I1128 15:44:27.480598 4884 generic.go:334] "Generic (PLEG): container finished" podID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerID="9522c0e7e07853449867a248b47d2b1ba02a40bb318fbe036c5ef6afb8a941c9" exitCode=143 Nov 28 15:44:27 crc kubenswrapper[4884]: I1128 15:44:27.480643 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerDied","Data":"9522c0e7e07853449867a248b47d2b1ba02a40bb318fbe036c5ef6afb8a941c9"} Nov 28 15:44:27 crc kubenswrapper[4884]: I1128 15:44:27.482641 4884 generic.go:334] "Generic (PLEG): container finished" podID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerID="63149cdb78b61ff8605f380c5ac1f511b919aa21dd5606a1425d16d16f5cff95" exitCode=143 Nov 28 15:44:27 crc kubenswrapper[4884]: I1128 15:44:27.482668 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerDied","Data":"63149cdb78b61ff8605f380c5ac1f511b919aa21dd5606a1425d16d16f5cff95"} Nov 28 15:44:29 crc kubenswrapper[4884]: I1128 15:44:29.885613 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:41814->10.217.0.192:8775: read: connection reset by peer" Nov 28 15:44:29 crc kubenswrapper[4884]: I1128 15:44:29.885679 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.192:8775/\": read tcp 10.217.0.2:41830->10.217.0.192:8775: read: connection reset by peer" Nov 28 15:44:30 crc kubenswrapper[4884]: I1128 15:44:30.522348 4884 generic.go:334] "Generic (PLEG): container finished" podID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerID="e3f5e31528ef3c065f96ad92bea080bc5996a8043131d95f1ee95074faa786e5" exitCode=0 Nov 28 15:44:30 crc kubenswrapper[4884]: I1128 15:44:30.522585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerDied","Data":"e3f5e31528ef3c065f96ad92bea080bc5996a8043131d95f1ee95074faa786e5"} Nov 28 15:44:30 crc kubenswrapper[4884]: I1128 15:44:30.887960 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.043926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l27tn\" (UniqueName: \"kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn\") pod \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.043990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data\") pod \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.044036 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs\") pod \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.044164 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs\") pod \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.044188 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle\") pod \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\" (UID: \"6eb2f4f0-9527-4a35-a9c0-e32c14102412\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.045510 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs" (OuterVolumeSpecName: "logs") pod "6eb2f4f0-9527-4a35-a9c0-e32c14102412" (UID: "6eb2f4f0-9527-4a35-a9c0-e32c14102412"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.051334 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn" (OuterVolumeSpecName: "kube-api-access-l27tn") pod "6eb2f4f0-9527-4a35-a9c0-e32c14102412" (UID: "6eb2f4f0-9527-4a35-a9c0-e32c14102412"). InnerVolumeSpecName "kube-api-access-l27tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.086069 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6eb2f4f0-9527-4a35-a9c0-e32c14102412" (UID: "6eb2f4f0-9527-4a35-a9c0-e32c14102412"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.092916 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data" (OuterVolumeSpecName: "config-data") pod "6eb2f4f0-9527-4a35-a9c0-e32c14102412" (UID: "6eb2f4f0-9527-4a35-a9c0-e32c14102412"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.111145 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "6eb2f4f0-9527-4a35-a9c0-e32c14102412" (UID: "6eb2f4f0-9527-4a35-a9c0-e32c14102412"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.146712 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l27tn\" (UniqueName: \"kubernetes.io/projected/6eb2f4f0-9527-4a35-a9c0-e32c14102412-kube-api-access-l27tn\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.146750 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.146769 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.146781 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6eb2f4f0-9527-4a35-a9c0-e32c14102412-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.146792 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb2f4f0-9527-4a35-a9c0-e32c14102412-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.313736 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.455822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle\") pod \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.455939 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data\") pod \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.456319 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmrtn\" (UniqueName: \"kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn\") pod \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\" (UID: \"8ffbf33c-053c-4b3b-b5ff-93c2391d4170\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.474798 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn" (OuterVolumeSpecName: "kube-api-access-wmrtn") pod "8ffbf33c-053c-4b3b-b5ff-93c2391d4170" (UID: "8ffbf33c-053c-4b3b-b5ff-93c2391d4170"). InnerVolumeSpecName "kube-api-access-wmrtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.490686 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ffbf33c-053c-4b3b-b5ff-93c2391d4170" (UID: "8ffbf33c-053c-4b3b-b5ff-93c2391d4170"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.510700 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data" (OuterVolumeSpecName: "config-data") pod "8ffbf33c-053c-4b3b-b5ff-93c2391d4170" (UID: "8ffbf33c-053c-4b3b-b5ff-93c2391d4170"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.537471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6eb2f4f0-9527-4a35-a9c0-e32c14102412","Type":"ContainerDied","Data":"c41d3346f6e36c5301057c18946867b508984a61e31997103864b18457ec0779"} Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.537490 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.537515 4884 scope.go:117] "RemoveContainer" containerID="e3f5e31528ef3c065f96ad92bea080bc5996a8043131d95f1ee95074faa786e5" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.539883 4884 generic.go:334] "Generic (PLEG): container finished" podID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" exitCode=0 Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.539979 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8ffbf33c-053c-4b3b-b5ff-93c2391d4170","Type":"ContainerDied","Data":"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10"} Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.540025 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8ffbf33c-053c-4b3b-b5ff-93c2391d4170","Type":"ContainerDied","Data":"e7a44241613e1dddf037308ac1d4ed0de5ef27bd2fb3febbb1e7834558cd94ce"} Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.540075 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.542534 4884 generic.go:334] "Generic (PLEG): container finished" podID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerID="1abf921eaf64d303e283dac0f5fd09c6477854284f0e13530abfecf77429cf82" exitCode=0 Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.542560 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerDied","Data":"1abf921eaf64d303e283dac0f5fd09c6477854284f0e13530abfecf77429cf82"} Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.546798 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.558756 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmrtn\" (UniqueName: \"kubernetes.io/projected/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-kube-api-access-wmrtn\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.558809 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.558826 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ffbf33c-053c-4b3b-b5ff-93c2391d4170-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.564208 4884 scope.go:117] "RemoveContainer" containerID="63149cdb78b61ff8605f380c5ac1f511b919aa21dd5606a1425d16d16f5cff95" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.587135 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.594356 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.607949 4884 scope.go:117] "RemoveContainer" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.620917 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.648782 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649279 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-api" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649299 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-api" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649324 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-log" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649330 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-log" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649342 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c349ee9-213b-41b3-9a78-b6a846ba244a" containerName="nova-manage" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649348 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c349ee9-213b-41b3-9a78-b6a846ba244a" containerName="nova-manage" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649358 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649364 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649379 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="dnsmasq-dns" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649385 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="dnsmasq-dns" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649397 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerName="nova-scheduler-scheduler" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649402 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerName="nova-scheduler-scheduler" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649415 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="init" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649422 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="init" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.649433 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649440 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649655 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-log" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649664 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dde288e-b169-42f7-af64-647eef580099" containerName="dnsmasq-dns" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649673 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" containerName="nova-scheduler-scheduler" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649685 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-metadata" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649697 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" containerName="nova-metadata-log" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649706 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" containerName="nova-api-api" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.649719 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c349ee9-213b-41b3-9a78-b6a846ba244a" containerName="nova-manage" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.650440 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.652561 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.658384 4884 scope.go:117] "RemoveContainer" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" Nov 28 15:44:31 crc kubenswrapper[4884]: E1128 15:44:31.658886 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10\": container with ID starting with cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10 not found: ID does not exist" containerID="cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.658918 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10"} err="failed to get container status \"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10\": rpc error: code = NotFound desc = could not find container \"cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10\": container with ID starting with cbab64356ffab7de2570444aff81e124fd61c9d0beeead5acf2a7b6731062e10 not found: ID does not exist" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659529 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvqfj\" (UniqueName: \"kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659565 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659610 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659681 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659790 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.659822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle\") pod \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\" (UID: \"a61d4d21-a654-4b24-9a4e-00d9ed08297e\") " Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.661036 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs" (OuterVolumeSpecName: "logs") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.669284 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj" (OuterVolumeSpecName: "kube-api-access-rvqfj") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "kube-api-access-rvqfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.673550 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.686887 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.697705 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.701460 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.703583 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.704145 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.708198 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data" (OuterVolumeSpecName: "config-data") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.708924 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.740465 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.752168 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.757237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a61d4d21-a654-4b24-9a4e-00d9ed08297e" (UID: "a61d4d21-a654-4b24-9a4e-00d9ed08297e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.762520 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.762832 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.762995 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsfq8\" (UniqueName: \"kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763128 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763202 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763299 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvqfj\" (UniqueName: \"kubernetes.io/projected/a61d4d21-a654-4b24-9a4e-00d9ed08297e-kube-api-access-rvqfj\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763370 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a61d4d21-a654-4b24-9a4e-00d9ed08297e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763433 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.763491 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a61d4d21-a654-4b24-9a4e-00d9ed08297e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865120 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsfq8\" (UniqueName: \"kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865198 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865278 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865370 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfqhd\" (UniqueName: \"kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865401 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.865515 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.874971 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.878636 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.886636 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsfq8\" (UniqueName: \"kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8\") pod \"nova-scheduler-0\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.967328 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.967385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfqhd\" (UniqueName: \"kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.967514 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.967564 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.967625 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.968107 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.970967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.972081 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.972216 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.973444 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:44:31 crc kubenswrapper[4884]: I1128 15:44:31.985798 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfqhd\" (UniqueName: \"kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd\") pod \"nova-metadata-0\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " pod="openstack/nova-metadata-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.024555 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.441042 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.568681 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0264205a-7b80-4df8-8d57-3923074f4a59","Type":"ContainerStarted","Data":"bdd402fc4cecc3bbf5078c050af471ca791f473998a04a5f22d577081ea0d273"} Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.571758 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a61d4d21-a654-4b24-9a4e-00d9ed08297e","Type":"ContainerDied","Data":"0bf090b43fd1e326c170f9a417d6560fbcf3ba50270fbe95c7e5990598e69352"} Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.571792 4884 scope.go:117] "RemoveContainer" containerID="1abf921eaf64d303e283dac0f5fd09c6477854284f0e13530abfecf77429cf82" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.571924 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.579015 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: W1128 15:44:32.579546 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39e34f52_669e_4086_94ef_a38542dbc6ea.slice/crio-2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e WatchSource:0}: Error finding container 2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e: Status 404 returned error can't find the container with id 2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.621980 4884 scope.go:117] "RemoveContainer" containerID="9522c0e7e07853449867a248b47d2b1ba02a40bb318fbe036c5ef6afb8a941c9" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.669412 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.710860 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb2f4f0-9527-4a35-a9c0-e32c14102412" path="/var/lib/kubelet/pods/6eb2f4f0-9527-4a35-a9c0-e32c14102412/volumes" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.711583 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ffbf33c-053c-4b3b-b5ff-93c2391d4170" path="/var/lib/kubelet/pods/8ffbf33c-053c-4b3b-b5ff-93c2391d4170/volumes" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.712201 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.712239 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.714444 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.714545 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.718533 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.718541 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.718772 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.798404 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.798458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.798680 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.798847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.798900 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnxwl\" (UniqueName: \"kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.799060 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901073 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901233 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901269 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901287 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnxwl\" (UniqueName: \"kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.901313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.904251 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.904314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.904784 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.904969 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.911114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:32 crc kubenswrapper[4884]: I1128 15:44:32.927969 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnxwl\" (UniqueName: \"kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl\") pod \"nova-api-0\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " pod="openstack/nova-api-0" Nov 28 15:44:33 crc kubenswrapper[4884]: I1128 15:44:33.050107 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.543527 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.595059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerStarted","Data":"8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.595119 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerStarted","Data":"0dc318d6697c8c4b07f99c224c62ddcb0dac46ebed54243f1880cec3c2d5fd1c"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.595138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerStarted","Data":"2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.607163 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0264205a-7b80-4df8-8d57-3923074f4a59","Type":"ContainerStarted","Data":"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.613808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerStarted","Data":"0293b7c161d530b750a3bd535b699eae0eb10652557e8f4a2376f96f300c168f"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.639227 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.639207392 podStartE2EDuration="2.639207392s" podCreationTimestamp="2025-11-28 15:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:33.634024975 +0000 UTC m=+1513.196808786" watchObservedRunningTime="2025-11-28 15:44:33.639207392 +0000 UTC m=+1513.201991193" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.662705 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.662688898 podStartE2EDuration="2.662688898s" podCreationTimestamp="2025-11-28 15:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:33.651838242 +0000 UTC m=+1513.214622043" watchObservedRunningTime="2025-11-28 15:44:33.662688898 +0000 UTC m=+1513.225472699" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.821136 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:33.871796 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:34.071288 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:34.640960 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerStarted","Data":"5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:34.641578 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerStarted","Data":"fcaf2c66276780f82ef88c966fdd309cd66bab9591912e6c0898a544d1dd5113"} Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:34.684475 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.684448119 podStartE2EDuration="2.684448119s" podCreationTimestamp="2025-11-28 15:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:44:34.665786171 +0000 UTC m=+1514.228569962" watchObservedRunningTime="2025-11-28 15:44:34.684448119 +0000 UTC m=+1514.247231960" Nov 28 15:44:34 crc kubenswrapper[4884]: I1128 15:44:34.703589 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a61d4d21-a654-4b24-9a4e-00d9ed08297e" path="/var/lib/kubelet/pods/a61d4d21-a654-4b24-9a4e-00d9ed08297e/volumes" Nov 28 15:44:35 crc kubenswrapper[4884]: I1128 15:44:35.656116 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-66vj2" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="registry-server" containerID="cri-o://8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d" gracePeriod=2 Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.301298 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.419133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgj55\" (UniqueName: \"kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55\") pod \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.419239 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities\") pod \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.419315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content\") pod \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\" (UID: \"a5fcd4e5-5661-4910-bb7d-633c6a08882c\") " Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.420674 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities" (OuterVolumeSpecName: "utilities") pod "a5fcd4e5-5661-4910-bb7d-633c6a08882c" (UID: "a5fcd4e5-5661-4910-bb7d-633c6a08882c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.426573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55" (OuterVolumeSpecName: "kube-api-access-jgj55") pod "a5fcd4e5-5661-4910-bb7d-633c6a08882c" (UID: "a5fcd4e5-5661-4910-bb7d-633c6a08882c"). InnerVolumeSpecName "kube-api-access-jgj55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.521452 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgj55\" (UniqueName: \"kubernetes.io/projected/a5fcd4e5-5661-4910-bb7d-633c6a08882c-kube-api-access-jgj55\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.521497 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.530983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5fcd4e5-5661-4910-bb7d-633c6a08882c" (UID: "a5fcd4e5-5661-4910-bb7d-633c6a08882c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.623243 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5fcd4e5-5661-4910-bb7d-633c6a08882c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.672414 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerID="8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d" exitCode=0 Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.672453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerDied","Data":"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d"} Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.672718 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-66vj2" event={"ID":"a5fcd4e5-5661-4910-bb7d-633c6a08882c","Type":"ContainerDied","Data":"0a1ae262d1747d5e07fa4d3465913515029152fb749b1f9702de775bd646bb13"} Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.672542 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-66vj2" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.672740 4884 scope.go:117] "RemoveContainer" containerID="8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.706961 4884 scope.go:117] "RemoveContainer" containerID="a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.731183 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.738910 4884 scope.go:117] "RemoveContainer" containerID="2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.740531 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-66vj2"] Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.775301 4884 scope.go:117] "RemoveContainer" containerID="8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d" Nov 28 15:44:36 crc kubenswrapper[4884]: E1128 15:44:36.775852 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d\": container with ID starting with 8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d not found: ID does not exist" containerID="8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.775881 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d"} err="failed to get container status \"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d\": rpc error: code = NotFound desc = could not find container \"8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d\": container with ID starting with 8c050fc85deba8e59553f2c293ac4ac1c5d16811269794d2bd166e62f17b5a8d not found: ID does not exist" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.775903 4884 scope.go:117] "RemoveContainer" containerID="a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a" Nov 28 15:44:36 crc kubenswrapper[4884]: E1128 15:44:36.776271 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a\": container with ID starting with a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a not found: ID does not exist" containerID="a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.776296 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a"} err="failed to get container status \"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a\": rpc error: code = NotFound desc = could not find container \"a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a\": container with ID starting with a8ec9f232af47328f0db4b2f179031d8d5c6b29765a4dc7f5f17cb4ac417888a not found: ID does not exist" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.776311 4884 scope.go:117] "RemoveContainer" containerID="2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2" Nov 28 15:44:36 crc kubenswrapper[4884]: E1128 15:44:36.776602 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2\": container with ID starting with 2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2 not found: ID does not exist" containerID="2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.776618 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2"} err="failed to get container status \"2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2\": rpc error: code = NotFound desc = could not find container \"2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2\": container with ID starting with 2a407c23fa1e73d1f7285ea5a9cf3056dcfdb13d522fbff7fe3618b7364d7ed2 not found: ID does not exist" Nov 28 15:44:36 crc kubenswrapper[4884]: I1128 15:44:36.973830 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:44:37 crc kubenswrapper[4884]: I1128 15:44:37.026003 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:44:37 crc kubenswrapper[4884]: I1128 15:44:37.026042 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:44:38 crc kubenswrapper[4884]: I1128 15:44:38.702810 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" path="/var/lib/kubelet/pods/a5fcd4e5-5661-4910-bb7d-633c6a08882c/volumes" Nov 28 15:44:41 crc kubenswrapper[4884]: I1128 15:44:41.973983 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:44:42 crc kubenswrapper[4884]: I1128 15:44:42.021631 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:44:42 crc kubenswrapper[4884]: I1128 15:44:42.026191 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:44:42 crc kubenswrapper[4884]: I1128 15:44:42.026250 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:44:42 crc kubenswrapper[4884]: I1128 15:44:42.777032 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:44:43 crc kubenswrapper[4884]: I1128 15:44:43.011585 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:44:43 crc kubenswrapper[4884]: I1128 15:44:43.038335 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:43 crc kubenswrapper[4884]: I1128 15:44:43.038502 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:43 crc kubenswrapper[4884]: I1128 15:44:43.052683 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:44:43 crc kubenswrapper[4884]: I1128 15:44:43.053024 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:44:44 crc kubenswrapper[4884]: I1128 15:44:44.086430 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:44 crc kubenswrapper[4884]: I1128 15:44:44.086475 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 15:44:51 crc kubenswrapper[4884]: I1128 15:44:51.243343 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:44:51 crc kubenswrapper[4884]: I1128 15:44:51.244010 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:44:52 crc kubenswrapper[4884]: I1128 15:44:52.034617 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:44:52 crc kubenswrapper[4884]: I1128 15:44:52.036438 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:44:52 crc kubenswrapper[4884]: I1128 15:44:52.043687 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:44:52 crc kubenswrapper[4884]: I1128 15:44:52.867767 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.074523 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.076285 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.076814 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.081890 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.869489 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:44:53 crc kubenswrapper[4884]: I1128 15:44:53.878948 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.167985 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw"] Nov 28 15:45:00 crc kubenswrapper[4884]: E1128 15:45:00.169121 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="extract-content" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.169139 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="extract-content" Nov 28 15:45:00 crc kubenswrapper[4884]: E1128 15:45:00.169154 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="extract-utilities" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.169161 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="extract-utilities" Nov 28 15:45:00 crc kubenswrapper[4884]: E1128 15:45:00.169180 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.169187 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.169406 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5fcd4e5-5661-4910-bb7d-633c6a08882c" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.170040 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.171949 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.172570 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.184434 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw"] Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.215786 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.216134 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.216308 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fljr\" (UniqueName: \"kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.317423 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.317468 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.317498 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fljr\" (UniqueName: \"kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.318420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.324875 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.335022 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fljr\" (UniqueName: \"kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr\") pod \"collect-profiles-29405745-ltflw\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:00 crc kubenswrapper[4884]: I1128 15:45:00.494021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:01 crc kubenswrapper[4884]: I1128 15:45:01.022306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw"] Nov 28 15:45:01 crc kubenswrapper[4884]: I1128 15:45:01.967058 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e6dd427-4110-4ec9-89b0-d94002c9a7f4" containerID="358d90be41536924816d57726ed5b634f19d2f1d18317aac8fb9bc70d889c19f" exitCode=0 Nov 28 15:45:01 crc kubenswrapper[4884]: I1128 15:45:01.967159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" event={"ID":"0e6dd427-4110-4ec9-89b0-d94002c9a7f4","Type":"ContainerDied","Data":"358d90be41536924816d57726ed5b634f19d2f1d18317aac8fb9bc70d889c19f"} Nov 28 15:45:01 crc kubenswrapper[4884]: I1128 15:45:01.967362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" event={"ID":"0e6dd427-4110-4ec9-89b0-d94002c9a7f4","Type":"ContainerStarted","Data":"e27e5c03142b09dd8e6be981f1c9ab6172e6b8a39df4a55c024c8911806f34d9"} Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.384371 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.484581 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fljr\" (UniqueName: \"kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr\") pod \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.484667 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume\") pod \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.484691 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume\") pod \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\" (UID: \"0e6dd427-4110-4ec9-89b0-d94002c9a7f4\") " Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.485364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume" (OuterVolumeSpecName: "config-volume") pod "0e6dd427-4110-4ec9-89b0-d94002c9a7f4" (UID: "0e6dd427-4110-4ec9-89b0-d94002c9a7f4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.490132 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr" (OuterVolumeSpecName: "kube-api-access-6fljr") pod "0e6dd427-4110-4ec9-89b0-d94002c9a7f4" (UID: "0e6dd427-4110-4ec9-89b0-d94002c9a7f4"). InnerVolumeSpecName "kube-api-access-6fljr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.491817 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0e6dd427-4110-4ec9-89b0-d94002c9a7f4" (UID: "0e6dd427-4110-4ec9-89b0-d94002c9a7f4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.586349 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.586384 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.586394 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fljr\" (UniqueName: \"kubernetes.io/projected/0e6dd427-4110-4ec9-89b0-d94002c9a7f4-kube-api-access-6fljr\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.995803 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" event={"ID":"0e6dd427-4110-4ec9-89b0-d94002c9a7f4","Type":"ContainerDied","Data":"e27e5c03142b09dd8e6be981f1c9ab6172e6b8a39df4a55c024c8911806f34d9"} Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.996059 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e27e5c03142b09dd8e6be981f1c9ab6172e6b8a39df4a55c024c8911806f34d9" Nov 28 15:45:03 crc kubenswrapper[4884]: I1128 15:45:03.995916 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.296817 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.297516 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" containerName="openstackclient" containerID="cri-o://c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6" gracePeriod=2 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.325053 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.399142 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.399907 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" containerName="openstackclient" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.399927 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" containerName="openstackclient" Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.399945 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e6dd427-4110-4ec9-89b0-d94002c9a7f4" containerName="collect-profiles" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.399952 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e6dd427-4110-4ec9-89b0-d94002c9a7f4" containerName="collect-profiles" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.400146 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e6dd427-4110-4ec9-89b0-d94002c9a7f4" containerName="collect-profiles" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.400165 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" containerName="openstackclient" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.400778 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.424416 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.473603 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.480783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flds4\" (UniqueName: \"kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4\") pod \"placementeaff-account-delete-tgs7q\" (UID: \"7bc5134b-7bda-47e0-86a3-b4f374e842e6\") " pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.505516 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.506141 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="openstack-network-exporter" containerID="cri-o://3b941bb6a3a7e306dfa8d1b96625c36d59ceacd6279193eca68547bae5466ce0" gracePeriod=300 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.579534 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-sfxfp"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.583751 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flds4\" (UniqueName: \"kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4\") pod \"placementeaff-account-delete-tgs7q\" (UID: \"7bc5134b-7bda-47e0-86a3-b4f374e842e6\") " pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.583945 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.583998 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data podName:454fa1ac-19ca-4c44-b0fb-2c30039524a7 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:13.083979751 +0000 UTC m=+1552.646763542 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data") pod "rabbitmq-cell1-server-0" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7") : configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.595139 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-sfxfp"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.600732 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="ovsdbserver-nb" containerID="cri-o://ae49153625b5a710cac5ba1fd2d4e6f0fc8441454ec160836b76057c9cc2c22e" gracePeriod=300 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.604808 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.605725 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="openstack-network-exporter" containerID="cri-o://a5c169886253947fb2ce50aaadfd10698791496c5feb2c5fc8da5851f6053e65" gracePeriod=300 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.641526 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flds4\" (UniqueName: \"kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4\") pod \"placementeaff-account-delete-tgs7q\" (UID: \"7bc5134b-7bda-47e0-86a3-b4f374e842e6\") " pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.738601 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.820915 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="ovsdbserver-sb" containerID="cri-o://2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" gracePeriod=300 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.863559 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd1c087f-48cf-4b19-b8a4-ca7677f72f3c" path="/var/lib/kubelet/pods/bd1c087f-48cf-4b19-b8a4-ca7677f72f3c/volumes" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.901901 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.902991 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.903017 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.872444 4884 log.go:32] "ExecSync cmd from runtime service failed" err=< Nov 28 15:45:12 crc kubenswrapper[4884]: rpc error: code = Unknown desc = command error: setns `mnt`: Bad file descriptor Nov 28 15:45:12 crc kubenswrapper[4884]: fail startup Nov 28 15:45:12 crc kubenswrapper[4884]: , stdout: , stderr: , exit code -1 Nov 28 15:45:12 crc kubenswrapper[4884]: > containerID="2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.903283 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.903900 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.903889 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb is running failed: container process not found" containerID="2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.904293 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb is running failed: container process not found" containerID="2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 15:45:12 crc kubenswrapper[4884]: E1128 15:45:12.904336 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-sb-0" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="ovsdbserver-sb" Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.928002 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.965831 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.966054 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="ovn-northd" containerID="cri-o://c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" gracePeriod=30 Nov 28 15:45:12 crc kubenswrapper[4884]: I1128 15:45:12.966521 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="openstack-network-exporter" containerID="cri-o://4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" gracePeriod=30 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.016695 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-bvnxp"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.018925 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7kcv\" (UniqueName: \"kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv\") pod \"barbican783f-account-delete-x9hl4\" (UID: \"38ca29b2-4263-4cb5-ba00-fe95430cf7f6\") " pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.019020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9xqs\" (UniqueName: \"kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs\") pod \"cinder0078-account-delete-nnnfh\" (UID: \"f633ca3e-03a6-4c24-9783-94fb61ed0ade\") " pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.032840 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-bvnxp"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.054316 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-r4pfq"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.074577 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-r4pfq"] Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.100304 4884 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.189:44792->38.102.83.189:45559: write tcp 38.102.83.189:44792->38.102.83.189:45559: write: broken pipe Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.141223 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7kcv\" (UniqueName: \"kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv\") pod \"barbican783f-account-delete-x9hl4\" (UID: \"38ca29b2-4263-4cb5-ba00-fe95430cf7f6\") " pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.141285 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9xqs\" (UniqueName: \"kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs\") pod \"cinder0078-account-delete-nnnfh\" (UID: \"f633ca3e-03a6-4c24-9783-94fb61ed0ade\") " pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.141649 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.141688 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data podName:454fa1ac-19ca-4c44-b0fb-2c30039524a7 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:14.141674528 +0000 UTC m=+1553.704458329 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data") pod "rabbitmq-cell1-server-0" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7") : configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.161076 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4dba6a6f-821c-4897-b88d-5cca9482f4fa/ovsdbserver-sb/0.log" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.161130 4884 generic.go:334] "Generic (PLEG): container finished" podID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerID="a5c169886253947fb2ce50aaadfd10698791496c5feb2c5fc8da5851f6053e65" exitCode=2 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.161144 4884 generic.go:334] "Generic (PLEG): container finished" podID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerID="2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" exitCode=143 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.161181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerDied","Data":"a5c169886253947fb2ce50aaadfd10698791496c5feb2c5fc8da5851f6053e65"} Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.161207 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerDied","Data":"2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb"} Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.176164 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.208295 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f/ovsdbserver-nb/0.log" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.209404 4884 generic.go:334] "Generic (PLEG): container finished" podID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerID="3b941bb6a3a7e306dfa8d1b96625c36d59ceacd6279193eca68547bae5466ce0" exitCode=2 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.209490 4884 generic.go:334] "Generic (PLEG): container finished" podID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerID="ae49153625b5a710cac5ba1fd2d4e6f0fc8441454ec160836b76057c9cc2c22e" exitCode=143 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.209553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerDied","Data":"3b941bb6a3a7e306dfa8d1b96625c36d59ceacd6279193eca68547bae5466ce0"} Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.209620 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerDied","Data":"ae49153625b5a710cac5ba1fd2d4e6f0fc8441454ec160836b76057c9cc2c22e"} Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.210746 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9xqs\" (UniqueName: \"kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs\") pod \"cinder0078-account-delete-nnnfh\" (UID: \"f633ca3e-03a6-4c24-9783-94fb61ed0ade\") " pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.211223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7kcv\" (UniqueName: \"kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv\") pod \"barbican783f-account-delete-x9hl4\" (UID: \"38ca29b2-4263-4cb5-ba00-fe95430cf7f6\") " pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.229343 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-m6njq"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.279216 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-m6njq"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.312207 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.312727 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7789c97d46-jmbnq" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-log" containerID="cri-o://4d34e7ecd7272753eb9a76334dacd796a0250216a60d30bc46bdd65b7be43497" gracePeriod=30 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.313747 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-7789c97d46-jmbnq" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-api" containerID="cri-o://70ad5e352be838cd9b823be6fa572eb0d45de5ed204f4a0a7f67e0afe112af20" gracePeriod=30 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.323347 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.324716 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.345627 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.345863 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data podName:3a5d81bd-3b99-4aa6-82dc-2969295dce39 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:13.845847422 +0000 UTC m=+1553.408631223 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data") pod "rabbitmq-server-0" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39") : configmap "rabbitmq-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.368546 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.447354 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.449565 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn4rk\" (UniqueName: \"kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk\") pod \"novacell0992b-account-delete-sfgzb\" (UID: \"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd\") " pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.481354 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.524218 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.525713 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.533009 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-t66fw"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.544716 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-t66fw"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.554624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn4rk\" (UniqueName: \"kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk\") pod \"novacell0992b-account-delete-sfgzb\" (UID: \"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd\") " pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.566314 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.580681 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn4rk\" (UniqueName: \"kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk\") pod \"novacell0992b-account-delete-sfgzb\" (UID: \"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd\") " pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.593044 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.599482 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="dnsmasq-dns" containerID="cri-o://8bd58f8dfb10dddf860b7726450d9b6180488f6c7bec696c42d57ea2a88b8b6e" gracePeriod=10 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.618757 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.644531 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.658494 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvg4g\" (UniqueName: \"kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g\") pod \"novacell1c1f2-account-delete-lnkf6\" (UID: \"90481f2a-55d7-459f-9e46-2ca816951a8d\") " pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.676212 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.676457 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-sndnk" podUID="69cf456a-4993-4bd5-b745-5d73a65b6b91" containerName="openstack-network-exporter" containerID="cri-o://59f590909455eb43b2d839c335076a9aae027b6d3bbbbda17ffef2e52abe1e46" gracePeriod=30 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.692329 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-z89zk"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.693141 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.702171 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-dn2ww"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.734394 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-z89zk"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.766727 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-dn2ww"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.778001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvg4g\" (UniqueName: \"kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g\") pod \"novacell1c1f2-account-delete-lnkf6\" (UID: \"90481f2a-55d7-459f-9e46-2ca816951a8d\") " pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.794924 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-eaff-account-create-pqqn6"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.815127 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-eaff-account-create-pqqn6"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.831218 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:45:13 crc kubenswrapper[4884]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 15:45:13 crc kubenswrapper[4884]: > Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.873905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvg4g\" (UniqueName: \"kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g\") pod \"novacell1c1f2-account-delete-lnkf6\" (UID: \"90481f2a-55d7-459f-9e46-2ca816951a8d\") " pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.886039 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: E1128 15:45:13.886236 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data podName:3a5d81bd-3b99-4aa6-82dc-2969295dce39 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:14.886213525 +0000 UTC m=+1554.448997326 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data") pod "rabbitmq-server-0" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39") : configmap "rabbitmq-config-data" not found Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.909991 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.983934 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.984194 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="cinder-scheduler" containerID="cri-o://46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d" gracePeriod=30 Nov 28 15:45:13 crc kubenswrapper[4884]: I1128 15:45:13.984589 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="probe" containerID="cri-o://3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.037058 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.037670 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api-log" containerID="cri-o://754a22031c7e79239a68f9b96363ce0d184e5432cae3b7213429811a1a369d36" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.037806 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api" containerID="cri-o://3abb3a7245fdef87ef691fa6295e4f7c18af329b641de8d6a74c5a455959669f" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.077348 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.078348 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-expirer" containerID="cri-o://bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.079005 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="rsync" containerID="cri-o://a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.082020 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-updater" containerID="cri-o://742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085331 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-server" containerID="cri-o://a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085373 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-auditor" containerID="cri-o://b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085391 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-replicator" containerID="cri-o://e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.086622 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="swift-recon-cron" containerID="cri-o://f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085407 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-server" containerID="cri-o://6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085420 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-updater" containerID="cri-o://abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085431 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-auditor" containerID="cri-o://70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085445 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-replicator" containerID="cri-o://644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085463 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-server" containerID="cri-o://88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085505 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-reaper" containerID="cri-o://6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085520 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-auditor" containerID="cri-o://e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.085534 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-replicator" containerID="cri-o://0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.100763 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.105741 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.108530 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f/ovsdbserver-nb/0.log" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.108621 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.127225 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.161752 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.162015 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-log" containerID="cri-o://deec2653ca05ea2b2431e7f8f5e25dfcc18dcdf5ee831af2554036dbfce9676e" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.162107 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-httpd" containerID="cri-o://1dba7e621dc762e39efb79478b3479dc3c3f7b54537bead34f7ee2f18daf1ebc" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.162849 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.191827 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-chfqr"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195353 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195384 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195424 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-259pm\" (UniqueName: \"kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195452 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195481 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.195697 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle\") pod \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\" (UID: \"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.196042 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.196079 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.196174 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffpzn\" (UniqueName: \"kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.196686 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config" (OuterVolumeSpecName: "config") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.196759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts" (OuterVolumeSpecName: "scripts") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.198550 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6cdac902-b0e5-4f41-923c-07241207d730/ovn-northd/0.log" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.198677 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.201322 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: E1128 15:45:14.201592 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:14 crc kubenswrapper[4884]: E1128 15:45:14.201653 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data podName:454fa1ac-19ca-4c44-b0fb-2c30039524a7 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:16.201631235 +0000 UTC m=+1555.764415036 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data") pod "rabbitmq-cell1-server-0" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7") : configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.203465 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.221034 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-chfqr"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.273314 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-dtn4c"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.285259 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm" (OuterVolumeSpecName: "kube-api-access-259pm") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "kube-api-access-259pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309673 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309713 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309743 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7x96\" (UniqueName: \"kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309909 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.309962 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.310112 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs\") pod \"6cdac902-b0e5-4f41-923c-07241207d730\" (UID: \"6cdac902-b0e5-4f41-923c-07241207d730\") " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.317201 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffpzn\" (UniqueName: \"kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.317477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.321645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323222 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323464 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323479 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323490 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6cdac902-b0e5-4f41-923c-07241207d730-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323505 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-259pm\" (UniqueName: \"kubernetes.io/projected/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-kube-api-access-259pm\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323523 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.323533 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.324645 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.324868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.325211 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config" (OuterVolumeSpecName: "config") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.324149 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts" (OuterVolumeSpecName: "scripts") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.340917 4884 generic.go:334] "Generic (PLEG): container finished" podID="006d76b7-d405-4056-a55b-f01661cde456" containerID="8bd58f8dfb10dddf860b7726450d9b6180488f6c7bec696c42d57ea2a88b8b6e" exitCode=0 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.341004 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" event={"ID":"006d76b7-d405-4056-a55b-f01661cde456","Type":"ContainerDied","Data":"8bd58f8dfb10dddf860b7726450d9b6180488f6c7bec696c42d57ea2a88b8b6e"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.351647 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-dtn4c"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.352398 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96" (OuterVolumeSpecName: "kube-api-access-v7x96") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "kube-api-access-v7x96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.375763 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386540 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6cdac902-b0e5-4f41-923c-07241207d730/ovn-northd/0.log" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386591 4884 generic.go:334] "Generic (PLEG): container finished" podID="6cdac902-b0e5-4f41-923c-07241207d730" containerID="4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" exitCode=2 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386614 4884 generic.go:334] "Generic (PLEG): container finished" podID="6cdac902-b0e5-4f41-923c-07241207d730" containerID="c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" exitCode=143 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386696 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerDied","Data":"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386728 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerDied","Data":"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386742 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6cdac902-b0e5-4f41-923c-07241207d730","Type":"ContainerDied","Data":"0894a630a352c74e7387b4a09b7a1ede361348647619265695937aa06d289b29"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386760 4884 scope.go:117] "RemoveContainer" containerID="4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.386932 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.409654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffpzn\" (UniqueName: \"kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn\") pod \"community-operators-5j947\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.414461 4884 generic.go:334] "Generic (PLEG): container finished" podID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerID="754a22031c7e79239a68f9b96363ce0d184e5432cae3b7213429811a1a369d36" exitCode=143 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.414555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerDied","Data":"754a22031c7e79239a68f9b96363ce0d184e5432cae3b7213429811a1a369d36"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.425238 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.425270 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.425286 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7x96\" (UniqueName: \"kubernetes.io/projected/6cdac902-b0e5-4f41-923c-07241207d730-kube-api-access-v7x96\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.425298 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cdac902-b0e5-4f41-923c-07241207d730-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.431728 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.431990 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-log" containerID="cri-o://ce2d368f9ec98e7e160e340cbf52ff57688a8c528c07d144e558a64b3ca00a72" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.432078 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-httpd" containerID="cri-o://ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.435585 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.445994 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f/ovsdbserver-nb/0.log" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.446319 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.452192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f","Type":"ContainerDied","Data":"8bf1c17a1dcac57300b9682afa060d2b53f593ebbbbad04d7a0960a3ff2b3121"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.471975 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.474722 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.475030 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sndnk_69cf456a-4993-4bd5-b745-5d73a65b6b91/openstack-network-exporter/0.log" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.475075 4884 generic.go:334] "Generic (PLEG): container finished" podID="69cf456a-4993-4bd5-b745-5d73a65b6b91" containerID="59f590909455eb43b2d839c335076a9aae027b6d3bbbbda17ffef2e52abe1e46" exitCode=2 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.475196 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sndnk" event={"ID":"69cf456a-4993-4bd5-b745-5d73a65b6b91","Type":"ContainerDied","Data":"59f590909455eb43b2d839c335076a9aae027b6d3bbbbda17ffef2e52abe1e46"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.502207 4884 generic.go:334] "Generic (PLEG): container finished" podID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerID="4d34e7ecd7272753eb9a76334dacd796a0250216a60d30bc46bdd65b7be43497" exitCode=143 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.502255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerDied","Data":"4d34e7ecd7272753eb9a76334dacd796a0250216a60d30bc46bdd65b7be43497"} Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.530077 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.530126 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.530137 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.558026 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.559256 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67dd7bd66f-2ff2l" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-api" containerID="cri-o://f5481c335b4d3298f0e98cb830ee1e1cade042a57387db9cfad9455b405cd581" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.559820 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-67dd7bd66f-2ff2l" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-httpd" containerID="cri-o://6058d7f72df016355acac3c48d8e08bd8294db35efcb91a326adb1708153048c" gracePeriod=30 Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.651892 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-ml6x7"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.658225 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" (UID: "c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.658235 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.726347 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "6cdac902-b0e5-4f41-923c-07241207d730" (UID: "6cdac902-b0e5-4f41-923c-07241207d730"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.734044 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.734068 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.734077 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cdac902-b0e5-4f41-923c-07241207d730-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.881858 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1" path="/var/lib/kubelet/pods/0a7fbcad-e6b7-4ed0-b031-e97d35b85fb1/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.882696 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c349ee9-213b-41b3-9a78-b6a846ba244a" path="/var/lib/kubelet/pods/1c349ee9-213b-41b3-9a78-b6a846ba244a/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.883454 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3088250d-24c6-4378-9ab6-67e4244567eb" path="/var/lib/kubelet/pods/3088250d-24c6-4378-9ab6-67e4244567eb/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.884146 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54b9e833-6baa-48ca-9b62-5b288f49c020" path="/var/lib/kubelet/pods/54b9e833-6baa-48ca-9b62-5b288f49c020/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.885828 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b792e18-e8f2-4f95-ad78-a9657e30d651" path="/var/lib/kubelet/pods/6b792e18-e8f2-4f95-ad78-a9657e30d651/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.888072 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f194cf7-2f65-4dde-842a-7b93fb8148b9" path="/var/lib/kubelet/pods/7f194cf7-2f65-4dde-842a-7b93fb8148b9/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.892272 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a24664a4-3143-4fb4-b050-30b81e52a1f3" path="/var/lib/kubelet/pods/a24664a4-3143-4fb4-b050-30b81e52a1f3/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.893035 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9823f75-8df0-467c-af91-ad863667138b" path="/var/lib/kubelet/pods/b9823f75-8df0-467c-af91-ad863667138b/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.893809 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5493d26-8edb-4f78-8e5b-bd65d9490900" path="/var/lib/kubelet/pods/f5493d26-8edb-4f78-8e5b-bd65d9490900/volumes" Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903121 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-ml6x7"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903164 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-32ab-account-create-jpj8s"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903180 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-32ab-account-create-jpj8s"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903203 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-npbhj"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903214 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-npbhj"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903226 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-39ed-account-create-szh2k"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903237 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-39ed-account-create-szh2k"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.903258 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-tghnk"] Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.942467 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-0078-account-create-z2ttf"] Nov 28 15:45:14 crc kubenswrapper[4884]: E1128 15:45:14.950764 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 15:45:14 crc kubenswrapper[4884]: E1128 15:45:14.950838 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data podName:3a5d81bd-3b99-4aa6-82dc-2969295dce39 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:16.950821976 +0000 UTC m=+1556.513605777 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data") pod "rabbitmq-server-0" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39") : configmap "rabbitmq-config-data" not found Nov 28 15:45:14 crc kubenswrapper[4884]: I1128 15:45:14.966848 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-0078-account-create-z2ttf"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.010824 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-tghnk"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.044146 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.057914 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-4qkr7"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.067451 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:15 crc kubenswrapper[4884]: E1128 15:45:15.069874 4884 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 15:45:15 crc kubenswrapper[4884]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 15:45:15 crc kubenswrapper[4884]: + source /usr/local/bin/container-scripts/functions Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNBridge=br-int Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNRemote=tcp:localhost:6642 Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNEncapType=geneve Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNAvailabilityZones= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ EnableChassisAsGateway=true Nov 28 15:45:15 crc kubenswrapper[4884]: ++ PhysicalNetworks= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNHostName= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 15:45:15 crc kubenswrapper[4884]: ++ ovs_dir=/var/lib/openvswitch Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 15:45:15 crc kubenswrapper[4884]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + cleanup_ovsdb_server_semaphore Nov 28 15:45:15 crc kubenswrapper[4884]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 15:45:15 crc kubenswrapper[4884]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-vm8q9" message=< Nov 28 15:45:15 crc kubenswrapper[4884]: Exiting ovsdb-server (5) ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 15:45:15 crc kubenswrapper[4884]: + source /usr/local/bin/container-scripts/functions Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNBridge=br-int Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNRemote=tcp:localhost:6642 Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNEncapType=geneve Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNAvailabilityZones= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ EnableChassisAsGateway=true Nov 28 15:45:15 crc kubenswrapper[4884]: ++ PhysicalNetworks= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNHostName= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 15:45:15 crc kubenswrapper[4884]: ++ ovs_dir=/var/lib/openvswitch Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 15:45:15 crc kubenswrapper[4884]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + cleanup_ovsdb_server_semaphore Nov 28 15:45:15 crc kubenswrapper[4884]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 15:45:15 crc kubenswrapper[4884]: > Nov 28 15:45:15 crc kubenswrapper[4884]: E1128 15:45:15.069978 4884 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 15:45:15 crc kubenswrapper[4884]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 15:45:15 crc kubenswrapper[4884]: + source /usr/local/bin/container-scripts/functions Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNBridge=br-int Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNRemote=tcp:localhost:6642 Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNEncapType=geneve Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNAvailabilityZones= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ EnableChassisAsGateway=true Nov 28 15:45:15 crc kubenswrapper[4884]: ++ PhysicalNetworks= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ OVNHostName= Nov 28 15:45:15 crc kubenswrapper[4884]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 15:45:15 crc kubenswrapper[4884]: ++ ovs_dir=/var/lib/openvswitch Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 15:45:15 crc kubenswrapper[4884]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 15:45:15 crc kubenswrapper[4884]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + sleep 0.5 Nov 28 15:45:15 crc kubenswrapper[4884]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 15:45:15 crc kubenswrapper[4884]: + cleanup_ovsdb_server_semaphore Nov 28 15:45:15 crc kubenswrapper[4884]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 15:45:15 crc kubenswrapper[4884]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 15:45:15 crc kubenswrapper[4884]: > pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" containerID="cri-o://88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.070055 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" containerID="cri-o://88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" gracePeriod=29 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.077472 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.077693 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-654d667c99-rmxwg" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker-log" containerID="cri-o://0ac907a0c0af6ead7b7e99a49821773ab7d22cc68e8accca85a5d26f4aca83ec" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.078084 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-654d667c99-rmxwg" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker" containerID="cri-o://02309734d390a1a60061c2de6a1ad950eea1ac1ad9073bbfc9869f4baf64d813" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.133464 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-783f-account-create-6222f"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.167352 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-4qkr7"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.183162 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-783f-account-create-6222f"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.190864 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.191187 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener-log" containerID="cri-o://55d3b2d4ca658ad8c1510b29e4414ddf3fb3b590d3f93d47c782a243e50ec135" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.191313 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener" containerID="cri-o://b5f34154cfef87b816d78795660796edfca1950b74e2cd3c82c08584957201a9" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.209630 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.222751 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.222996 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69c66bbb4b-wzn9n" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api-log" containerID="cri-o://a13fa47dc18a9e97c12845d9bf327b2a99237952371e8e2be117fbd3fd157b73" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.223496 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-69c66bbb4b-wzn9n" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api" containerID="cri-o://6c624f23934a2757b9d0fda39e8ae22b7e58a88676450e0d3ad4c24f2ee510f7" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.233143 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.233392 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-log" containerID="cri-o://fcaf2c66276780f82ef88c966fdd309cd66bab9591912e6c0898a544d1dd5113" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.233527 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-api" containerID="cri-o://5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.258381 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.284770 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.285055 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" containerID="cri-o://0dc318d6697c8c4b07f99c224c62ddcb0dac46ebed54243f1880cec3c2d5fd1c" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.285498 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" containerID="cri-o://8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.306595 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2f46-account-create-jv9gr"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.327099 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-6kmhj"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.327314 4884 scope.go:117] "RemoveContainer" containerID="c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.339469 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.342589 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2f46-account-create-jv9gr"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.352647 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-c1f2-account-create-vf7lg"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.361493 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-6kmhj"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.362456 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.366854 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" containerID="cri-o://76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" gracePeriod=29 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.371565 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4dba6a6f-821c-4897-b88d-5cca9482f4fa/ovsdbserver-sb/0.log" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.376149 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.376253 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-dhdfs"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.399811 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sndnk_69cf456a-4993-4bd5-b745-5d73a65b6b91/openstack-network-exporter/0.log" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.399883 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.405404 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-c1f2-account-create-vf7lg"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.408877 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.427387 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-dhdfs"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.437791 4884 scope.go:117] "RemoveContainer" containerID="4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.440595 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: E1128 15:45:15.440718 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e\": container with ID starting with 4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e not found: ID does not exist" containerID="4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.440746 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e"} err="failed to get container status \"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e\": rpc error: code = NotFound desc = could not find container \"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e\": container with ID starting with 4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e not found: ID does not exist" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.440768 4884 scope.go:117] "RemoveContainer" containerID="c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" Nov 28 15:45:15 crc kubenswrapper[4884]: E1128 15:45:15.444047 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c\": container with ID starting with c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c not found: ID does not exist" containerID="c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.444081 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c"} err="failed to get container status \"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c\": rpc error: code = NotFound desc = could not find container \"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c\": container with ID starting with c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c not found: ID does not exist" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.444116 4884 scope.go:117] "RemoveContainer" containerID="4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.444597 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e"} err="failed to get container status \"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e\": rpc error: code = NotFound desc = could not find container \"4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e\": container with ID starting with 4762e6f292376d10f23a175cf52fae39aca3e3e3c0c5a47e0fce3b0fa8c7378e not found: ID does not exist" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.444619 4884 scope.go:117] "RemoveContainer" containerID="c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.447421 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c"} err="failed to get container status \"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c\": rpc error: code = NotFound desc = could not find container \"c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c\": container with ID starting with c5ec28b364139c9541284077e274031377597cedbc991d0f4b3eeba82c5e0e8c not found: ID does not exist" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.447449 4884 scope.go:117] "RemoveContainer" containerID="3b941bb6a3a7e306dfa8d1b96625c36d59ceacd6279193eca68547bae5466ce0" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.447601 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.453919 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.454425 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://763ed99f997eeeb0d08695603ce8409930b1a454f8319f482b75714ea0827268" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.461327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466065 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466188 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466236 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466277 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhhv5\" (UniqueName: \"kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466304 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466415 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466455 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466494 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466863 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466891 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.466971 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.467637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnspd\" (UniqueName: \"kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd\") pod \"006d76b7-d405-4056-a55b-f01661cde456\" (UID: \"006d76b7-d405-4056-a55b-f01661cde456\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.467675 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs\") pod \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\" (UID: \"4dba6a6f-821c-4897-b88d-5cca9482f4fa\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.471237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.471684 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config" (OuterVolumeSpecName: "config") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.471751 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts" (OuterVolumeSpecName: "scripts") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.471792 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.471982 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" containerID="cri-o://b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.479811 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.480326 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5" (OuterVolumeSpecName: "kube-api-access-mhhv5") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "kube-api-access-mhhv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.491496 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd" (OuterVolumeSpecName: "kube-api-access-pnspd") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "kube-api-access-pnspd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.491571 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zssbq"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.506134 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-zssbq"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.511373 4884 scope.go:117] "RemoveContainer" containerID="ae49153625b5a710cac5ba1fd2d4e6f0fc8441454ec160836b76057c9cc2c22e" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.513254 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.513461 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" containerName="nova-cell1-conductor-conductor" containerID="cri-o://831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.526839 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="rabbitmq" containerID="cri-o://343440ad4aebfa33b7450af592a1090831143203f96732b5193c19160e303a6f" gracePeriod=604800 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.540181 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.540739 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerName="nova-cell0-conductor-conductor" containerID="cri-o://eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.550237 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wgxnl"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.555263 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="rabbitmq" containerID="cri-o://5cea516e9ac81f0a76972ae4e518242f21039bb650a8fa7f56df976c1aec2cbc" gracePeriod=604800 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571499 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44rdp\" (UniqueName: \"kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571617 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret\") pod \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571663 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571739 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tll75\" (UniqueName: \"kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75\") pod \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571758 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config\") pod \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571778 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571847 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir\") pod \"69cf456a-4993-4bd5-b745-5d73a65b6b91\" (UID: \"69cf456a-4993-4bd5-b745-5d73a65b6b91\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.571910 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle\") pod \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\" (UID: \"e4aa2db6-3fe4-43e0-8603-86dbd3a238e3\") " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572322 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572335 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhhv5\" (UniqueName: \"kubernetes.io/projected/4dba6a6f-821c-4897-b88d-5cca9482f4fa-kube-api-access-mhhv5\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572356 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572366 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dba6a6f-821c-4897-b88d-5cca9482f4fa-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572376 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.572384 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnspd\" (UniqueName: \"kubernetes.io/projected/006d76b7-d405-4056-a55b-f01661cde456-kube-api-access-pnspd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.573665 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.573709 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.583049 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config" (OuterVolumeSpecName: "config") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.589987 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75" (OuterVolumeSpecName: "kube-api-access-tll75") pod "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" (UID: "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3"). InnerVolumeSpecName "kube-api-access-tll75". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.591487 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-wgxnl"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.599286 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp" (OuterVolumeSpecName: "kube-api-access-44rdp") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "kube-api-access-44rdp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.599490 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.604431 4884 generic.go:334] "Generic (PLEG): container finished" podID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerID="55d3b2d4ca658ad8c1510b29e4414ddf3fb3b590d3f93d47c782a243e50ec135" exitCode=143 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.604502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerDied","Data":"55d3b2d4ca658ad8c1510b29e4414ddf3fb3b590d3f93d47c782a243e50ec135"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.608789 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.610921 4884 generic.go:334] "Generic (PLEG): container finished" podID="49c9a390-4563-4e1e-a109-ff673e664409" containerID="a13fa47dc18a9e97c12845d9bf327b2a99237952371e8e2be117fbd3fd157b73" exitCode=143 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.610994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerDied","Data":"a13fa47dc18a9e97c12845d9bf327b2a99237952371e8e2be117fbd3fd157b73"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.614935 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" event={"ID":"006d76b7-d405-4056-a55b-f01661cde456","Type":"ContainerDied","Data":"85d5d24d88e32819495a90b2605529d70054f09d61bf3ea16d4a41a2d6121778"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.615044 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-4mp84" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.621763 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.622798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementeaff-account-delete-tgs7q" event={"ID":"7bc5134b-7bda-47e0-86a3-b4f374e842e6","Type":"ContainerStarted","Data":"dd3c3fcdf3aec789a9e7b6153b7bf3f8eaa0ccc30670ab97373412c67d5f4cd7"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.622841 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementeaff-account-delete-tgs7q" event={"ID":"7bc5134b-7bda-47e0-86a3-b4f374e842e6","Type":"ContainerStarted","Data":"694adf5215001a887e1e20f6c763ae4955faae8e55d03a425298d8a9d337182b"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.622962 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placementeaff-account-delete-tgs7q" podUID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" containerName="mariadb-account-delete" containerID="cri-o://dd3c3fcdf3aec789a9e7b6153b7bf3f8eaa0ccc30670ab97373412c67d5f4cd7" gracePeriod=30 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.642079 4884 generic.go:334] "Generic (PLEG): container finished" podID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" containerID="c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6" exitCode=137 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.642300 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.664006 4884 scope.go:117] "RemoveContainer" containerID="8bd58f8dfb10dddf860b7726450d9b6180488f6c7bec696c42d57ea2a88b8b6e" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.668270 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config" (OuterVolumeSpecName: "config") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.670635 4884 generic.go:334] "Generic (PLEG): container finished" podID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerID="6058d7f72df016355acac3c48d8e08bd8294db35efcb91a326adb1708153048c" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.670716 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerDied","Data":"6058d7f72df016355acac3c48d8e08bd8294db35efcb91a326adb1708153048c"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674295 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69cf456a-4993-4bd5-b745-5d73a65b6b91-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674326 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tll75\" (UniqueName: \"kubernetes.io/projected/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-kube-api-access-tll75\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674334 4884 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674344 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674352 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/69cf456a-4993-4bd5-b745-5d73a65b6b91-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674360 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44rdp\" (UniqueName: \"kubernetes.io/projected/69cf456a-4993-4bd5-b745-5d73a65b6b91-kube-api-access-44rdp\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.674368 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.759300 4884 scope.go:117] "RemoveContainer" containerID="1848c2fd98ec9c6cc4e03c50a5728558a64d191560cff7ff8ba31f079d3152a9" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.760653 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.763292 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" (UID: "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.775187 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.780071 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.780117 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.780132 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.781707 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.789483 4884 generic.go:334] "Generic (PLEG): container finished" podID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerID="0ac907a0c0af6ead7b7e99a49821773ab7d22cc68e8accca85a5d26f4aca83ec" exitCode=143 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.789620 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerDied","Data":"0ac907a0c0af6ead7b7e99a49821773ab7d22cc68e8accca85a5d26f4aca83ec"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.792779 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4dba6a6f-821c-4897-b88d-5cca9482f4fa/ovsdbserver-sb/0.log" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.793003 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dba6a6f-821c-4897-b88d-5cca9482f4fa","Type":"ContainerDied","Data":"63e656df8f1d12ce740664a60092cbcac3a306b5dd1672c6f364faf74c96c8d0"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.793359 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.804727 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-sndnk_69cf456a-4993-4bd5-b745-5d73a65b6b91/openstack-network-exporter/0.log" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.804984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-sndnk" event={"ID":"69cf456a-4993-4bd5-b745-5d73a65b6b91","Type":"ContainerDied","Data":"4b618815a97880b423cd9473468febd7a7c6d7ecfc67c7ddbe2a95872853999f"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.805239 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-sndnk" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.839515 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placementeaff-account-delete-tgs7q" podStartSLOduration=3.839494135 podStartE2EDuration="3.839494135s" podCreationTimestamp="2025-11-28 15:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:45:15.667364777 +0000 UTC m=+1555.230148578" watchObservedRunningTime="2025-11-28 15:45:15.839494135 +0000 UTC m=+1555.402277926" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.852437 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.882766 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911115 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911146 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911155 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911171 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911177 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911184 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911190 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911196 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911203 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911209 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911220 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911226 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911233 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911240 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09" exitCode=0 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911309 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911346 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911366 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911375 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911383 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911392 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911414 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911432 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911440 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911449 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.911462 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.942650 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.970486 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0e2713c-5c3d-457f-b444-122039f003d3" containerID="deec2653ca05ea2b2431e7f8f5e25dfcc18dcdf5ee831af2554036dbfce9676e" exitCode=143 Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.970546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerDied","Data":"deec2653ca05ea2b2431e7f8f5e25dfcc18dcdf5ee831af2554036dbfce9676e"} Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.976158 4884 scope.go:117] "RemoveContainer" containerID="c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6" Nov 28 15:45:15 crc kubenswrapper[4884]: I1128 15:45:15.985670 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.007026 4884 generic.go:334] "Generic (PLEG): container finished" podID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerID="fcaf2c66276780f82ef88c966fdd309cd66bab9591912e6c0898a544d1dd5113" exitCode=143 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.007182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerDied","Data":"fcaf2c66276780f82ef88c966fdd309cd66bab9591912e6c0898a544d1dd5113"} Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.022554 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" (UID: "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.031653 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.053253 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.055440 4884 generic.go:334] "Generic (PLEG): container finished" podID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" exitCode=0 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.055600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerDied","Data":"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4"} Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.069405 4884 generic.go:334] "Generic (PLEG): container finished" podID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerID="3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd" exitCode=0 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.069482 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerDied","Data":"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd"} Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.077446 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "006d76b7-d405-4056-a55b-f01661cde456" (UID: "006d76b7-d405-4056-a55b-f01661cde456"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.086473 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.089833 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.089874 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/006d76b7-d405-4056-a55b-f01661cde456-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.089883 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.091265 4884 generic.go:334] "Generic (PLEG): container finished" podID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerID="ce2d368f9ec98e7e160e340cbf52ff57688a8c528c07d144e558a64b3ca00a72" exitCode=143 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.095570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerDied","Data":"ce2d368f9ec98e7e160e340cbf52ff57688a8c528c07d144e558a64b3ca00a72"} Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.099906 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" (UID: "e4aa2db6-3fe4-43e0-8603-86dbd3a238e3"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.100491 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.106807 4884 generic.go:334] "Generic (PLEG): container finished" podID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerID="0dc318d6697c8c4b07f99c224c62ddcb0dac46ebed54243f1880cec3c2d5fd1c" exitCode=143 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.106856 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerDied","Data":"0dc318d6697c8c4b07f99c224c62ddcb0dac46ebed54243f1880cec3c2d5fd1c"} Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.119314 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="galera" containerID="cri-o://73bf92758339020175aea8e11f436696e330b710e4144aaee2fa67b45a5f9276" gracePeriod=30 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.141873 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "69cf456a-4993-4bd5-b745-5d73a65b6b91" (UID: "69cf456a-4993-4bd5-b745-5d73a65b6b91"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.162171 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.191506 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.192665 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.192697 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69cf456a-4993-4bd5-b745-5d73a65b6b91-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.208865 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "4dba6a6f-821c-4897-b88d-5cca9482f4fa" (UID: "4dba6a6f-821c-4897-b88d-5cca9482f4fa"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.295328 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dba6a6f-821c-4897-b88d-5cca9482f4fa-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:16 crc kubenswrapper[4884]: E1128 15:45:16.295418 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:16 crc kubenswrapper[4884]: E1128 15:45:16.295467 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data podName:454fa1ac-19ca-4c44-b0fb-2c30039524a7 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:20.29545116 +0000 UTC m=+1559.858234961 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data") pod "rabbitmq-cell1-server-0" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7") : configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:16 crc kubenswrapper[4884]: W1128 15:45:16.300516 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2e3f8ba_4fe2_433c_85d4_b30c488af3cd.slice/crio-8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c WatchSource:0}: Error finding container 8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c: Status 404 returned error can't find the container with id 8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.301778 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:16 crc kubenswrapper[4884]: W1128 15:45:16.370403 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebf7deee_37f0_4b47_8e10_3027e7009916.slice/crio-8c0e43646c29184c6b847236372ebf4db7aaa9a8bf7767ab6f834a471f9bba70 WatchSource:0}: Error finding container 8c0e43646c29184c6b847236372ebf4db7aaa9a8bf7767ab6f834a471f9bba70: Status 404 returned error can't find the container with id 8c0e43646c29184c6b847236372ebf4db7aaa9a8bf7767ab6f834a471f9bba70 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.403188 4884 scope.go:117] "RemoveContainer" containerID="c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6" Nov 28 15:45:16 crc kubenswrapper[4884]: E1128 15:45:16.407209 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6\": container with ID starting with c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6 not found: ID does not exist" containerID="c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.407264 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6"} err="failed to get container status \"c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6\": rpc error: code = NotFound desc = could not find container \"c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6\": container with ID starting with c74023adf576f1e4172a0537269b445e073373f69f8c7c20d5d415ff920ef4b6 not found: ID does not exist" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.407304 4884 scope.go:117] "RemoveContainer" containerID="a5c169886253947fb2ce50aaadfd10698791496c5feb2c5fc8da5851f6053e65" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.416883 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.427556 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-4mp84"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.451690 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.469768 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.524501 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.530398 4884 scope.go:117] "RemoveContainer" containerID="2f6f898b6b1123fb5294da0cc31225fadef570801105ece1eb5ae7e08bdbfeeb" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.531763 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-sndnk"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.561263 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.561995 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-988fdb959-xkp66" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-server" containerID="cri-o://ae366ef4151eca779ecbf163f7ffdccba3b64e846702b94d2bd3434c05e40d34" gracePeriod=30 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.561700 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-988fdb959-xkp66" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" containerID="cri-o://c9fbb45580ab524d334d02b6207e2923abff684e828f592b260b02070af2c617" gracePeriod=30 Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.580123 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.195:6080/vnc_lite.html\": dial tcp 10.217.0.195:6080: connect: connection refused" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.599986 4884 scope.go:117] "RemoveContainer" containerID="59f590909455eb43b2d839c335076a9aae027b6d3bbbbda17ffef2e52abe1e46" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.704141 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="006d76b7-d405-4056-a55b-f01661cde456" path="/var/lib/kubelet/pods/006d76b7-d405-4056-a55b-f01661cde456/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.704735 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20e1b396-c515-4176-9402-72f570001c08" path="/var/lib/kubelet/pods/20e1b396-c515-4176-9402-72f570001c08/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.705463 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33d7bf6d-b03e-493b-9e3f-6dcb2b223681" path="/var/lib/kubelet/pods/33d7bf6d-b03e-493b-9e3f-6dcb2b223681/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.706068 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4326f7b3-d838-49f3-b54f-574a55b44de4" path="/var/lib/kubelet/pods/4326f7b3-d838-49f3-b54f-574a55b44de4/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.707222 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47ca75c0-343b-4cef-82d6-24c20f014435" path="/var/lib/kubelet/pods/47ca75c0-343b-4cef-82d6-24c20f014435/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.707821 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a72f0d8-d60b-4f12-97ed-9ddaca128ff8" path="/var/lib/kubelet/pods/4a72f0d8-d60b-4f12-97ed-9ddaca128ff8/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.708752 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b9d1dde-9e0b-47bb-8046-a5687c344d9b" path="/var/lib/kubelet/pods/4b9d1dde-9e0b-47bb-8046-a5687c344d9b/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.711148 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" path="/var/lib/kubelet/pods/4dba6a6f-821c-4897-b88d-5cca9482f4fa/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.712229 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69cf456a-4993-4bd5-b745-5d73a65b6b91" path="/var/lib/kubelet/pods/69cf456a-4993-4bd5-b745-5d73a65b6b91/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.713913 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cdac902-b0e5-4f41-923c-07241207d730" path="/var/lib/kubelet/pods/6cdac902-b0e5-4f41-923c-07241207d730/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.714759 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a17a6e8-da33-4343-bc08-feb493224228" path="/var/lib/kubelet/pods/8a17a6e8-da33-4343-bc08-feb493224228/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.716226 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac05886e-6151-4d56-92dd-093c27c7c955" path="/var/lib/kubelet/pods/ac05886e-6151-4d56-92dd-093c27c7c955/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.717055 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad44ad3b-6966-47ed-ac5d-fb310b053dbc" path="/var/lib/kubelet/pods/ad44ad3b-6966-47ed-ac5d-fb310b053dbc/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.719118 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bba819eb-5c1d-410b-9a6e-59c00da11771" path="/var/lib/kubelet/pods/bba819eb-5c1d-410b-9a6e-59c00da11771/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.719680 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc0daf5c-fae9-4225-95bf-2fb62f2da934" path="/var/lib/kubelet/pods/cc0daf5c-fae9-4225-95bf-2fb62f2da934/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.720399 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4aa2db6-3fe4-43e0-8603-86dbd3a238e3" path="/var/lib/kubelet/pods/e4aa2db6-3fe4-43e0-8603-86dbd3a238e3/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.722896 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9a69be8-8e9e-45b3-ab4d-b8f9a148e878" path="/var/lib/kubelet/pods/e9a69be8-8e9e-45b3-ab4d-b8f9a148e878/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.723536 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f45dbb12-57af-4477-8456-37834db54a0c" path="/var/lib/kubelet/pods/f45dbb12-57af-4477-8456-37834db54a0c/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.724223 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7a1b531-4dbd-481a-a256-47393a6f53c5" path="/var/lib/kubelet/pods/f7a1b531-4dbd-481a-a256-47393a6f53c5/volumes" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.763866 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915365 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbw97\" (UniqueName: \"kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915411 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915459 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915483 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915610 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.915646 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom\") pod \"769485f5-63dc-4d17-9bfb-3006d99e2616\" (UID: \"769485f5-63dc-4d17-9bfb-3006d99e2616\") " Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.919885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.926290 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts" (OuterVolumeSpecName: "scripts") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.926365 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97" (OuterVolumeSpecName: "kube-api-access-cbw97") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "kube-api-access-cbw97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:16 crc kubenswrapper[4884]: I1128 15:45:16.927326 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.017879 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.017953 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data podName:3a5d81bd-3b99-4aa6-82dc-2969295dce39 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:21.017934106 +0000 UTC m=+1560.580717907 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data") pod "rabbitmq-server-0" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39") : configmap "rabbitmq-config-data" not found Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.018414 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/769485f5-63dc-4d17-9bfb-3006d99e2616-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.018459 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.018472 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbw97\" (UniqueName: \"kubernetes.io/projected/769485f5-63dc-4d17-9bfb-3006d99e2616-kube-api-access-cbw97\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.018487 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.032448 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.035380 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.039296 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.039334 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.062753 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data" (OuterVolumeSpecName: "config-data") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.086059 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "769485f5-63dc-4d17-9bfb-3006d99e2616" (UID: "769485f5-63dc-4d17-9bfb-3006d99e2616"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.120372 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.120394 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/769485f5-63dc-4d17-9bfb-3006d99e2616-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.140759 4884 generic.go:334] "Generic (PLEG): container finished" podID="38ca29b2-4263-4cb5-ba00-fe95430cf7f6" containerID="44cd99c65316901d33074d95c92f5d515715e45a10031091ae4584eaeeb68f07" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.140813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican783f-account-delete-x9hl4" event={"ID":"38ca29b2-4263-4cb5-ba00-fe95430cf7f6","Type":"ContainerDied","Data":"44cd99c65316901d33074d95c92f5d515715e45a10031091ae4584eaeeb68f07"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.140835 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican783f-account-delete-x9hl4" event={"ID":"38ca29b2-4263-4cb5-ba00-fe95430cf7f6","Type":"ContainerStarted","Data":"972be72ce9ccb798c52fc2ba5c705c2f155ef97dbb9da33bb02f30991d74dd47"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.145614 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1c1f2-account-delete-lnkf6" event={"ID":"90481f2a-55d7-459f-9e46-2ca816951a8d","Type":"ContainerDied","Data":"5a95d190e562709b097e62fe0f40779fc9e24384a8d1895edcab6d08bbb14472"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.146571 4884 generic.go:334] "Generic (PLEG): container finished" podID="90481f2a-55d7-459f-9e46-2ca816951a8d" containerID="5a95d190e562709b097e62fe0f40779fc9e24384a8d1895edcab6d08bbb14472" exitCode=1 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.146690 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1c1f2-account-delete-lnkf6" event={"ID":"90481f2a-55d7-459f-9e46-2ca816951a8d","Type":"ContainerStarted","Data":"76eb634b791382d403eee6f78ff2b9f31981550fecad35f6f1215b7e5bb4bdb2"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.151074 4884 generic.go:334] "Generic (PLEG): container finished" podID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerID="763ed99f997eeeb0d08695603ce8409930b1a454f8319f482b75714ea0827268" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.151139 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"08c698b7-a3f8-4639-8237-a8e005ae2669","Type":"ContainerDied","Data":"763ed99f997eeeb0d08695603ce8409930b1a454f8319f482b75714ea0827268"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.151158 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"08c698b7-a3f8-4639-8237-a8e005ae2669","Type":"ContainerDied","Data":"315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.151170 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="315407436e5b0de387bb6fd1fa2da1474c06c9ef9e43c4a9b23b873c40171369" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.152978 4884 generic.go:334] "Generic (PLEG): container finished" podID="f788ec00-6116-4f7f-ac08-21623599090d" containerID="ae366ef4151eca779ecbf163f7ffdccba3b64e846702b94d2bd3434c05e40d34" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.153004 4884 generic.go:334] "Generic (PLEG): container finished" podID="f788ec00-6116-4f7f-ac08-21623599090d" containerID="c9fbb45580ab524d334d02b6207e2923abff684e828f592b260b02070af2c617" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.153022 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerDied","Data":"ae366ef4151eca779ecbf163f7ffdccba3b64e846702b94d2bd3434c05e40d34"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.153079 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerDied","Data":"c9fbb45580ab524d334d02b6207e2923abff684e828f592b260b02070af2c617"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.163483 4884 generic.go:334] "Generic (PLEG): container finished" podID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerID="46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.163571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerDied","Data":"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.163618 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"769485f5-63dc-4d17-9bfb-3006d99e2616","Type":"ContainerDied","Data":"728671745f46c957923baa92164b93a32b0f269345d3d8b0d48a5ec05ba50e20"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.163638 4884 scope.go:117] "RemoveContainer" containerID="3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.163910 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.191671 4884 generic.go:334] "Generic (PLEG): container finished" podID="c1e2007d-c536-47f5-9d03-92069c96f654" containerID="831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.191939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c1e2007d-c536-47f5-9d03-92069c96f654","Type":"ContainerDied","Data":"831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.191959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c1e2007d-c536-47f5-9d03-92069c96f654","Type":"ContainerDied","Data":"1180b9da0455c776e4db49d03528dd64ad9d827c0a6a276293c6e32165d2e88e"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.191969 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1180b9da0455c776e4db49d03528dd64ad9d827c0a6a276293c6e32165d2e88e" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.194813 4884 generic.go:334] "Generic (PLEG): container finished" podID="c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" containerID="cc77c44684f7d1ced03406af74ae901e41656eca80e8b630852276ad4fe96d9d" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.194883 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0992b-account-delete-sfgzb" event={"ID":"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd","Type":"ContainerDied","Data":"cc77c44684f7d1ced03406af74ae901e41656eca80e8b630852276ad4fe96d9d"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.194899 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0992b-account-delete-sfgzb" event={"ID":"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd","Type":"ContainerStarted","Data":"8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c"} Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.196695 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8 is running failed: container process not found" containerID="831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.197153 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8 is running failed: container process not found" containerID="831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.197396 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8 is running failed: container process not found" containerID="831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.197425 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" containerName="nova-cell1-conductor-conductor" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.198379 4884 generic.go:334] "Generic (PLEG): container finished" podID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" containerID="dd3c3fcdf3aec789a9e7b6153b7bf3f8eaa0ccc30670ab97373412c67d5f4cd7" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.198433 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementeaff-account-delete-tgs7q" event={"ID":"7bc5134b-7bda-47e0-86a3-b4f374e842e6","Type":"ContainerDied","Data":"dd3c3fcdf3aec789a9e7b6153b7bf3f8eaa0ccc30670ab97373412c67d5f4cd7"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.198455 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementeaff-account-delete-tgs7q" event={"ID":"7bc5134b-7bda-47e0-86a3-b4f374e842e6","Type":"ContainerDied","Data":"694adf5215001a887e1e20f6c763ae4955faae8e55d03a425298d8a9d337182b"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.198467 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="694adf5215001a887e1e20f6c763ae4955faae8e55d03a425298d8a9d337182b" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.203195 4884 generic.go:334] "Generic (PLEG): container finished" podID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerID="70ad5e352be838cd9b823be6fa572eb0d45de5ed204f4a0a7f67e0afe112af20" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.203303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerDied","Data":"70ad5e352be838cd9b823be6fa572eb0d45de5ed204f4a0a7f67e0afe112af20"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.207374 4884 generic.go:334] "Generic (PLEG): container finished" podID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerID="532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.207415 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerDied","Data":"532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.207429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerStarted","Data":"8c0e43646c29184c6b847236372ebf4db7aaa9a8bf7767ab6f834a471f9bba70"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.221002 4884 generic.go:334] "Generic (PLEG): container finished" podID="f633ca3e-03a6-4c24-9783-94fb61ed0ade" containerID="9cd2eb76fc50e8e3faf8777ccbf612add4a055c39bdf3a15fe9e7894fdd3cb9e" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.221182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0078-account-delete-nnnfh" event={"ID":"f633ca3e-03a6-4c24-9783-94fb61ed0ade","Type":"ContainerDied","Data":"9cd2eb76fc50e8e3faf8777ccbf612add4a055c39bdf3a15fe9e7894fdd3cb9e"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.221208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0078-account-delete-nnnfh" event={"ID":"f633ca3e-03a6-4c24-9783-94fb61ed0ade","Type":"ContainerStarted","Data":"c8fbde66354abcb01f69c9dcf5b9dcab98c4ce61f831b3f338d15547a08a55fc"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.231011 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerID="73bf92758339020175aea8e11f436696e330b710e4144aaee2fa67b45a5f9276" exitCode=0 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.231080 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerDied","Data":"73bf92758339020175aea8e11f436696e330b710e4144aaee2fa67b45a5f9276"} Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.487250 4884 scope.go:117] "RemoveContainer" containerID="46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.521803 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.529507 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.535931 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.543675 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.546149 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.574394 4884 scope.go:117] "RemoveContainer" containerID="3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.574541 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.575597 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd\": container with ID starting with 3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd not found: ID does not exist" containerID="3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.575626 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd"} err="failed to get container status \"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd\": rpc error: code = NotFound desc = could not find container \"3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd\": container with ID starting with 3f054079fe1036c7ed6c0fedab54754fe54f835413d1ac39f55021e187bdf2dd not found: ID does not exist" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.575644 4884 scope.go:117] "RemoveContainer" containerID="46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d" Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.578955 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d\": container with ID starting with 46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d not found: ID does not exist" containerID="46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.578991 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d"} err="failed to get container status \"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d\": rpc error: code = NotFound desc = could not find container \"46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d\": container with ID starting with 46405e1aa8f3930de16eda44854463dbefe8aff5318beab7157481294ff6ea3d not found: ID does not exist" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.593523 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.594546 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.165:8776/healthcheck\": read tcp 10.217.0.2:57592->10.217.0.165:8776: read: connection reset by peer" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.633616 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flds4\" (UniqueName: \"kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4\") pod \"7bc5134b-7bda-47e0-86a3-b4f374e842e6\" (UID: \"7bc5134b-7bda-47e0-86a3-b4f374e842e6\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.633944 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") pod \"c1e2007d-c536-47f5-9d03-92069c96f654\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.633991 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data\") pod \"08c698b7-a3f8-4639-8237-a8e005ae2669\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634065 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs\") pod \"08c698b7-a3f8-4639-8237-a8e005ae2669\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634105 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle\") pod \"08c698b7-a3f8-4639-8237-a8e005ae2669\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634176 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data\") pod \"c1e2007d-c536-47f5-9d03-92069c96f654\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634207 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzt8n\" (UniqueName: \"kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n\") pod \"08c698b7-a3f8-4639-8237-a8e005ae2669\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634263 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs\") pod \"08c698b7-a3f8-4639-8237-a8e005ae2669\" (UID: \"08c698b7-a3f8-4639-8237-a8e005ae2669\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.634319 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqz5v\" (UniqueName: \"kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v\") pod \"c1e2007d-c536-47f5-9d03-92069c96f654\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.638818 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4" (OuterVolumeSpecName: "kube-api-access-flds4") pod "7bc5134b-7bda-47e0-86a3-b4f374e842e6" (UID: "7bc5134b-7bda-47e0-86a3-b4f374e842e6"). InnerVolumeSpecName "kube-api-access-flds4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.647334 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c is running failed: container process not found" containerID="eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.647766 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c is running failed: container process not found" containerID="eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.649276 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c is running failed: container process not found" containerID="eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 15:45:17 crc kubenswrapper[4884]: E1128 15:45:17.649315 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerName="nova-cell0-conductor-conductor" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.702524 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v" (OuterVolumeSpecName: "kube-api-access-gqz5v") pod "c1e2007d-c536-47f5-9d03-92069c96f654" (UID: "c1e2007d-c536-47f5-9d03-92069c96f654"). InnerVolumeSpecName "kube-api-access-gqz5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.702681 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08c698b7-a3f8-4639-8237-a8e005ae2669" (UID: "08c698b7-a3f8-4639-8237-a8e005ae2669"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.704933 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n" (OuterVolumeSpecName: "kube-api-access-xzt8n") pod "08c698b7-a3f8-4639-8237-a8e005ae2669" (UID: "08c698b7-a3f8-4639-8237-a8e005ae2669"). InnerVolumeSpecName "kube-api-access-xzt8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.728785 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data" (OuterVolumeSpecName: "config-data") pod "08c698b7-a3f8-4639-8237-a8e005ae2669" (UID: "08c698b7-a3f8-4639-8237-a8e005ae2669"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.728812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "08c698b7-a3f8-4639-8237-a8e005ae2669" (UID: "08c698b7-a3f8-4639-8237-a8e005ae2669"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.731269 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data" (OuterVolumeSpecName: "config-data") pod "c1e2007d-c536-47f5-9d03-92069c96f654" (UID: "c1e2007d-c536-47f5-9d03-92069c96f654"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736181 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1e2007d-c536-47f5-9d03-92069c96f654" (UID: "c1e2007d-c536-47f5-9d03-92069c96f654"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736462 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736522 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736540 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736580 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736600 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736712 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736796 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736830 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") pod \"c1e2007d-c536-47f5-9d03-92069c96f654\" (UID: \"c1e2007d-c536-47f5-9d03-92069c96f654\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736914 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.736981 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737017 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737037 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8m62\" (UniqueName: \"kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62\") pod \"6237eb73-294e-4e4b-a619-e669061a1b5b\" (UID: \"6237eb73-294e-4e4b-a619-e669061a1b5b\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737126 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtvc9\" (UniqueName: \"kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9\") pod \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\" (UID: \"4c2dcae8-7c76-46e9-90d4-afd8af5f474a\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737475 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzt8n\" (UniqueName: \"kubernetes.io/projected/08c698b7-a3f8-4639-8237-a8e005ae2669-kube-api-access-xzt8n\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737490 4884 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737498 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqz5v\" (UniqueName: \"kubernetes.io/projected/c1e2007d-c536-47f5-9d03-92069c96f654-kube-api-access-gqz5v\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737507 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flds4\" (UniqueName: \"kubernetes.io/projected/7bc5134b-7bda-47e0-86a3-b4f374e842e6-kube-api-access-flds4\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737515 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737523 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.737531 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.738456 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.739076 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: W1128 15:45:17.739880 4884 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/c1e2007d-c536-47f5-9d03-92069c96f654/volumes/kubernetes.io~secret/combined-ca-bundle Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.739912 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1e2007d-c536-47f5-9d03-92069c96f654" (UID: "c1e2007d-c536-47f5-9d03-92069c96f654"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.740217 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs" (OuterVolumeSpecName: "logs") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.740286 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.740313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.746674 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62" (OuterVolumeSpecName: "kube-api-access-f8m62") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "kube-api-access-f8m62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.747003 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts" (OuterVolumeSpecName: "scripts") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.750011 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9" (OuterVolumeSpecName: "kube-api-access-jtvc9") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "kube-api-access-jtvc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.750130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "08c698b7-a3f8-4639-8237-a8e005ae2669" (UID: "08c698b7-a3f8-4639-8237-a8e005ae2669"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.754174 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets" (OuterVolumeSpecName: "secrets") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.755403 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.763643 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.839319 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.839380 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840675 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840820 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840873 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840923 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.840949 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8778\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778\") pod \"f788ec00-6116-4f7f-ac08-21623599090d\" (UID: \"f788ec00-6116-4f7f-ac08-21623599090d\") " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841508 4884 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/08c698b7-a3f8-4639-8237-a8e005ae2669-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841522 4884 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841532 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841549 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841558 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841567 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8m62\" (UniqueName: \"kubernetes.io/projected/6237eb73-294e-4e4b-a619-e669061a1b5b-kube-api-access-f8m62\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841576 4884 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841584 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtvc9\" (UniqueName: \"kubernetes.io/projected/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-kube-api-access-jtvc9\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841592 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841600 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841607 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6237eb73-294e-4e4b-a619-e669061a1b5b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.841618 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e2007d-c536-47f5-9d03-92069c96f654-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.842522 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.843921 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.881977 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.882358 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-central-agent" containerID="cri-o://5f7512ac40024faf5c002b3ea07aae281fd765dc79598f4d2e49a37be1071a24" gracePeriod=30 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.883034 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="proxy-httpd" containerID="cri-o://4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce" gracePeriod=30 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.883194 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="sg-core" containerID="cri-o://77143d790300f24e1ec71423bf2b5cceb1697eca359224ae98446bd1168afb55" gracePeriod=30 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.883267 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-notification-agent" containerID="cri-o://b0e0f537c987f444034b20d830933a5ee4e9b9836a56cfdf4c62f7d8b6d604d0" gracePeriod=30 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.899134 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.899339 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="585208a7-186b-40da-a7af-be303777e77c" containerName="kube-state-metrics" containerID="cri-o://b42cabfe2b0f40870c93167cc022af22181c4c8400a649d0268e8625b2010d96" gracePeriod=30 Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.913216 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.949273 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778" (OuterVolumeSpecName: "kube-api-access-l8778") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "kube-api-access-l8778". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.963184 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.963210 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f788ec00-6116-4f7f-ac08-21623599090d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.963219 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8778\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-kube-api-access-l8778\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:17 crc kubenswrapper[4884]: I1128 15:45:17.963231 4884 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f788ec00-6116-4f7f-ac08-21623599090d-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.014766 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.059578 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.059783 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="959ac7da-0d4b-48f3-84af-2650cd91c143" containerName="memcached" containerID="cri-o://42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e" gracePeriod=30 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.066241 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.093258 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-n5pb8"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.118033 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-n5pb8"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.133026 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data" (OuterVolumeSpecName: "config-data") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.133564 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.138220 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-tj565"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.146218 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-tj565"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.151412 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.151935 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-7d44bc67d-rzq4r" podUID="91405fb1-1a28-4fb4-9548-84c4b1797d45" containerName="keystone-api" containerID="cri-o://b935ab2ff92c4824683a68fa77cd7073eb24e434873a920819150e3598f5fb46" gracePeriod=30 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.168062 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.188057 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.188146 4884 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.196356 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-2mmr5"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.201827 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-2mmr5"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.208369 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-482c-account-create-cmhd8"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.209281 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c2dcae8-7c76-46e9-90d4-afd8af5f474a" (UID: "4c2dcae8-7c76-46e9-90d4-afd8af5f474a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.213416 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-482c-account-create-cmhd8"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.294363 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c2dcae8-7c76-46e9-90d4-afd8af5f474a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.314167 4884 generic.go:334] "Generic (PLEG): container finished" podID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerID="eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" exitCode=0 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.314231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"fdd604f8-af2a-40bb-b85a-14d7a4eeb000","Type":"ContainerDied","Data":"eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.316626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"4c2dcae8-7c76-46e9-90d4-afd8af5f474a","Type":"ContainerDied","Data":"044fe30c427646beca35b46432244b97d3f929afc22157e44f74964314cf5593"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.316676 4884 scope.go:117] "RemoveContainer" containerID="73bf92758339020175aea8e11f436696e330b710e4144aaee2fa67b45a5f9276" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.316741 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.319045 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-zfz9w"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.326436 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican783f-account-delete-x9hl4" event={"ID":"38ca29b2-4263-4cb5-ba00-fe95430cf7f6","Type":"ContainerDied","Data":"972be72ce9ccb798c52fc2ba5c705c2f155ef97dbb9da33bb02f30991d74dd47"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.326469 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="972be72ce9ccb798c52fc2ba5c705c2f155ef97dbb9da33bb02f30991d74dd47" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.327654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell1c1f2-account-delete-lnkf6" event={"ID":"90481f2a-55d7-459f-9e46-2ca816951a8d","Type":"ContainerDied","Data":"76eb634b791382d403eee6f78ff2b9f31981550fecad35f6f1215b7e5bb4bdb2"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.327670 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76eb634b791382d403eee6f78ff2b9f31981550fecad35f6f1215b7e5bb4bdb2" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.328706 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-zfz9w"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.329705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7789c97d46-jmbnq" event={"ID":"6237eb73-294e-4e4b-a619-e669061a1b5b","Type":"ContainerDied","Data":"97d4a4efde61a9fefbf19dd65380e6b45dead8570ef82552146b687eb51f597f"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.329795 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7789c97d46-jmbnq" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.343651 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.358981 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-992b-account-create-j5mtz"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.359956 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder0078-account-delete-nnnfh" event={"ID":"f633ca3e-03a6-4c24-9783-94fb61ed0ade","Type":"ContainerDied","Data":"c8fbde66354abcb01f69c9dcf5b9dcab98c4ce61f831b3f338d15547a08a55fc"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.360023 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8fbde66354abcb01f69c9dcf5b9dcab98c4ce61f831b3f338d15547a08a55fc" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.373350 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.388291 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-992b-account-create-j5mtz"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.393916 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data" (OuterVolumeSpecName: "config-data") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.394938 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.394964 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.396575 4884 generic.go:334] "Generic (PLEG): container finished" podID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerID="77143d790300f24e1ec71423bf2b5cceb1697eca359224ae98446bd1168afb55" exitCode=2 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.396644 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerDied","Data":"77143d790300f24e1ec71423bf2b5cceb1697eca359224ae98446bd1168afb55"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.408395 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0e2713c-5c3d-457f-b444-122039f003d3" containerID="1dba7e621dc762e39efb79478b3479dc3c3f7b54537bead34f7ee2f18daf1ebc" exitCode=0 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.408473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerDied","Data":"1dba7e621dc762e39efb79478b3479dc3c3f7b54537bead34f7ee2f18daf1ebc"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.411419 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-988fdb959-xkp66" event={"ID":"f788ec00-6116-4f7f-ac08-21623599090d","Type":"ContainerDied","Data":"b555ae7c0c5977cc1d9b60f0509852e6ae44111472e28406dc152276d3889743"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.411519 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.434244 4884 generic.go:334] "Generic (PLEG): container finished" podID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerID="3abb3a7245fdef87ef691fa6295e4f7c18af329b641de8d6a74c5a455959669f" exitCode=0 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.434350 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerDied","Data":"3abb3a7245fdef87ef691fa6295e4f7c18af329b641de8d6a74c5a455959669f"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.449306 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.454002 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.480482 4884 generic.go:334] "Generic (PLEG): container finished" podID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerID="ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d" exitCode=0 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.480582 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerDied","Data":"ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.494583 4884 generic.go:334] "Generic (PLEG): container finished" podID="585208a7-186b-40da-a7af-be303777e77c" containerID="b42cabfe2b0f40870c93167cc022af22181c4c8400a649d0268e8625b2010d96" exitCode=2 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.494671 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementeaff-account-delete-tgs7q" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.497138 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.497161 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.497359 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"585208a7-186b-40da-a7af-be303777e77c","Type":"ContainerDied","Data":"b42cabfe2b0f40870c93167cc022af22181c4c8400a649d0268e8625b2010d96"} Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.497436 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.498550 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.502260 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.532849 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f788ec00-6116-4f7f-ac08-21623599090d" (UID: "f788ec00-6116-4f7f-ac08-21623599090d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.539647 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="galera" containerID="cri-o://be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023" gracePeriod=30 Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.548546 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6237eb73-294e-4e4b-a619-e669061a1b5b" (UID: "6237eb73-294e-4e4b-a619-e669061a1b5b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.587985 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:58850->10.217.0.202:8775: read: connection reset by peer" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.588019 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": read tcp 10.217.0.2:58836->10.217.0.202:8775: read: connection reset by peer" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.598240 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.598265 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f788ec00-6116-4f7f-ac08-21623599090d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.598276 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6237eb73-294e-4e4b-a619-e669061a1b5b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.699917 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91c5a29a_fbc0_4c91_a3a3_e4d96c911c98.slice/crio-4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91c5a29a_fbc0_4c91_a3a3_e4d96c911c98.slice/crio-conmon-4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb0b621b_d100_4ec9_b815_13e67489a2ac.slice/crio-5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d1f5739_e8a4_4081_8104_57dfc250861a.slice/crio-conmon-ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d1f5739_e8a4_4081_8104_57dfc250861a.slice/crio-ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod959ac7da_0d4b_48f3_84af_2650cd91c143.slice/crio-42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb0b621b_d100_4ec9_b815_13e67489a2ac.slice/crio-conmon-5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39e34f52_669e_4086_94ef_a38542dbc6ea.slice/crio-8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.722802 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.722983 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.725018 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.725627 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e723bad-9c0e-48e4-a08e-b95c84e15b81" path="/var/lib/kubelet/pods/0e723bad-9c0e-48e4-a08e-b95c84e15b81/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.725685 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.732418 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23bd4715-1fa9-48a3-8bc0-7d92059de47d" path="/var/lib/kubelet/pods/23bd4715-1fa9-48a3-8bc0-7d92059de47d/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.732828 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.732946 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.733223 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f6c7973-c2da-49c1-be51-6901ab4ba930" path="/var/lib/kubelet/pods/6f6c7973-c2da-49c1-be51-6901ab4ba930/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.733448 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:18 crc kubenswrapper[4884]: E1128 15:45:18.733548 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.733903 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="727e8f17-29bf-4c2f-b91b-f26b036f86f4" path="/var/lib/kubelet/pods/727e8f17-29bf-4c2f-b91b-f26b036f86f4/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.735137 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" path="/var/lib/kubelet/pods/769485f5-63dc-4d17-9bfb-3006d99e2616/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.736361 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6386f1f-20c4-4856-8171-064b53c9fa7f" path="/var/lib/kubelet/pods/a6386f1f-20c4-4856-8171-064b53c9fa7f/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.737079 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6c45f17-c6a2-4e48-8f4b-507043c949c0" path="/var/lib/kubelet/pods/b6c45f17-c6a2-4e48-8f4b-507043c949c0/volumes" Nov 28 15:45:18 crc kubenswrapper[4884]: I1128 15:45:18.753238 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:45:18 crc kubenswrapper[4884]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 28 15:45:18 crc kubenswrapper[4884]: > Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.217676 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.469237 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.470342 4884 scope.go:117] "RemoveContainer" containerID="702d39f50ebfee6a7674542892d8e03d46e3c07866fffdb7282582d852c4e020" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.497419 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.501812 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.515240 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9xqs\" (UniqueName: \"kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs\") pod \"f633ca3e-03a6-4c24-9783-94fb61ed0ade\" (UID: \"f633ca3e-03a6-4c24-9783-94fb61ed0ade\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.515369 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7kcv\" (UniqueName: \"kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv\") pod \"38ca29b2-4263-4cb5-ba00-fe95430cf7f6\" (UID: \"38ca29b2-4263-4cb5-ba00-fe95430cf7f6\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.515394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvg4g\" (UniqueName: \"kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g\") pod \"90481f2a-55d7-459f-9e46-2ca816951a8d\" (UID: \"90481f2a-55d7-459f-9e46-2ca816951a8d\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.519978 4884 scope.go:117] "RemoveContainer" containerID="70ad5e352be838cd9b823be6fa572eb0d45de5ed204f4a0a7f67e0afe112af20" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.535430 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"585208a7-186b-40da-a7af-be303777e77c","Type":"ContainerDied","Data":"b64e8785dfef95508e56c17da6cc095bf7935164fe8821054cd246eaeccee998"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.535469 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b64e8785dfef95508e56c17da6cc095bf7935164fe8821054cd246eaeccee998" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.539069 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g" (OuterVolumeSpecName: "kube-api-access-nvg4g") pod "90481f2a-55d7-459f-9e46-2ca816951a8d" (UID: "90481f2a-55d7-459f-9e46-2ca816951a8d"). InnerVolumeSpecName "kube-api-access-nvg4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.539835 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs" (OuterVolumeSpecName: "kube-api-access-k9xqs") pod "f633ca3e-03a6-4c24-9783-94fb61ed0ade" (UID: "f633ca3e-03a6-4c24-9783-94fb61ed0ade"). InnerVolumeSpecName "kube-api-access-k9xqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.542480 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0992b-account-delete-sfgzb" event={"ID":"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd","Type":"ContainerDied","Data":"8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.542517 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bbfb53aa56ee7bb23ef5c84e91ae49fe842f4d1ab68015ae4439c9cf6b8160c" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.542624 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.545348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv" (OuterVolumeSpecName: "kube-api-access-v7kcv") pod "38ca29b2-4263-4cb5-ba00-fe95430cf7f6" (UID: "38ca29b2-4263-4cb5-ba00-fe95430cf7f6"). InnerVolumeSpecName "kube-api-access-v7kcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.553169 4884 generic.go:334] "Generic (PLEG): container finished" podID="49c9a390-4563-4e1e-a109-ff673e664409" containerID="6c624f23934a2757b9d0fda39e8ae22b7e58a88676450e0d3ad4c24f2ee510f7" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.553249 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerDied","Data":"6c624f23934a2757b9d0fda39e8ae22b7e58a88676450e0d3ad4c24f2ee510f7"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.553274 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-69c66bbb4b-wzn9n" event={"ID":"49c9a390-4563-4e1e-a109-ff673e664409","Type":"ContainerDied","Data":"dbd261bf5e4f95b88bd84297cbca7b56b8ee21d4e5d6972fd71eec32ed161998"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.553285 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbd261bf5e4f95b88bd84297cbca7b56b8ee21d4e5d6972fd71eec32ed161998" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.556120 4884 scope.go:117] "RemoveContainer" containerID="4d34e7ecd7272753eb9a76334dacd796a0250216a60d30bc46bdd65b7be43497" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.557176 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.560062 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c0e2713c-5c3d-457f-b444-122039f003d3","Type":"ContainerDied","Data":"4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.560104 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b7fde603181a3eb51c9d691cdd1803e3e36e47baa1ea23dfd5d221381a3396a" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.570760 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.582936 4884 generic.go:334] "Generic (PLEG): container finished" podID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerID="5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.583025 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerDied","Data":"5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.583450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"cb0b621b-d100-4ec9-b815-13e67489a2ac","Type":"ContainerDied","Data":"0293b7c161d530b750a3bd535b699eae0eb10652557e8f4a2376f96f300c168f"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.583468 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0293b7c161d530b750a3bd535b699eae0eb10652557e8f4a2376f96f300c168f" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.586602 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.590424 4884 generic.go:334] "Generic (PLEG): container finished" podID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerID="4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.607464 4884 generic.go:334] "Generic (PLEG): container finished" podID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerID="b0e0f537c987f444034b20d830933a5ee4e9b9836a56cfdf4c62f7d8b6d604d0" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.607581 4884 generic.go:334] "Generic (PLEG): container finished" podID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerID="5f7512ac40024faf5c002b3ea07aae281fd765dc79598f4d2e49a37be1071a24" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.590524 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerDied","Data":"4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.607806 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerDied","Data":"b0e0f537c987f444034b20d830933a5ee4e9b9836a56cfdf4c62f7d8b6d604d0"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.607829 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerDied","Data":"5f7512ac40024faf5c002b3ea07aae281fd765dc79598f4d2e49a37be1071a24"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.615927 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6949\" (UniqueName: \"kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.615962 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.615983 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616019 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data\") pod \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616034 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htm6x\" (UniqueName: \"kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x\") pod \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616057 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616117 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616148 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616162 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616183 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616216 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616236 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2ljx\" (UniqueName: \"kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616272 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616288 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle\") pod \"c0e2713c-5c3d-457f-b444-122039f003d3\" (UID: \"c0e2713c-5c3d-457f-b444-122039f003d3\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616344 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616368 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616382 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616404 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle\") pod \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\" (UID: \"fdd604f8-af2a-40bb-b85a-14d7a4eeb000\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616428 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j26jz\" (UniqueName: \"kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616450 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616465 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle\") pod \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\" (UID: \"5c94e18c-15c4-4ef6-929f-c1941dbd3919\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616479 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle\") pod \"5d1f5739-e8a4-4081-8104-57dfc250861a\" (UID: \"5d1f5739-e8a4-4081-8104-57dfc250861a\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616695 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9xqs\" (UniqueName: \"kubernetes.io/projected/f633ca3e-03a6-4c24-9783-94fb61ed0ade-kube-api-access-k9xqs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616707 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7kcv\" (UniqueName: \"kubernetes.io/projected/38ca29b2-4263-4cb5-ba00-fe95430cf7f6-kube-api-access-v7kcv\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.616716 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvg4g\" (UniqueName: \"kubernetes.io/projected/90481f2a-55d7-459f-9e46-2ca816951a8d-kube-api-access-nvg4g\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.617888 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.618079 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.618726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs" (OuterVolumeSpecName: "logs") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.630106 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs" (OuterVolumeSpecName: "logs") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.630633 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.632399 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.633618 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz" (OuterVolumeSpecName: "kube-api-access-j26jz") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "kube-api-access-j26jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.637454 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.639663 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.639883 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"fdd604f8-af2a-40bb-b85a-14d7a4eeb000","Type":"ContainerDied","Data":"94ff87db0fa96609c40771cfd3ba38660175958b52bbe20173bd45d00e84082f"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.642461 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs" (OuterVolumeSpecName: "logs") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.645624 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.648432 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5d1f5739-e8a4-4081-8104-57dfc250861a","Type":"ContainerDied","Data":"a3d365104e57088338695b8adb26a68005e2a0e7efc884213881152a54b62ee1"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.648454 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.651103 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.656249 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x" (OuterVolumeSpecName: "kube-api-access-htm6x") pod "fdd604f8-af2a-40bb-b85a-14d7a4eeb000" (UID: "fdd604f8-af2a-40bb-b85a-14d7a4eeb000"). InnerVolumeSpecName "kube-api-access-htm6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.656342 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts" (OuterVolumeSpecName: "scripts") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.656515 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.656550 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.657172 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949" (OuterVolumeSpecName: "kube-api-access-s6949") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "kube-api-access-s6949". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.657635 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.658228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx" (OuterVolumeSpecName: "kube-api-access-w2ljx") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "kube-api-access-w2ljx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.663672 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.672262 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.682371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts" (OuterVolumeSpecName: "scripts") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.682458 4884 scope.go:117] "RemoveContainer" containerID="ae366ef4151eca779ecbf163f7ffdccba3b64e846702b94d2bd3434c05e40d34" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.686892 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.687124 4884 generic.go:334] "Generic (PLEG): container finished" podID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerID="02309734d390a1a60061c2de6a1ad950eea1ac1ad9073bbfc9869f4baf64d813" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.687180 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerDied","Data":"02309734d390a1a60061c2de6a1ad950eea1ac1ad9073bbfc9869f4baf64d813"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.690031 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts" (OuterVolumeSpecName: "scripts") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.693067 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.693581 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.697671 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5c94e18c-15c4-4ef6-929f-c1941dbd3919","Type":"ContainerDied","Data":"83827b6cc313c6873782279a18c96b5c020a7410f947b34200c5517f0d6476fe"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.697749 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.699434 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-7789c97d46-jmbnq"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.700354 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.700439 4884 generic.go:334] "Generic (PLEG): container finished" podID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerID="8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.700477 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerDied","Data":"8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.700493 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"39e34f52-669e-4086-94ef-a38542dbc6ea","Type":"ContainerDied","Data":"2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.700503 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f3a42a3d315efeeba3b0212b8f21e571b010b1cc5c084c7bfad5c095354006e" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.704463 4884 generic.go:334] "Generic (PLEG): container finished" podID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerID="b5f34154cfef87b816d78795660796edfca1950b74e2cd3c82c08584957201a9" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.704509 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerDied","Data":"b5f34154cfef87b816d78795660796edfca1950b74e2cd3c82c08584957201a9"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.709208 4884 generic.go:334] "Generic (PLEG): container finished" podID="959ac7da-0d4b-48f3-84af-2650cd91c143" containerID="42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e" exitCode=0 Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.709275 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell1c1f2-account-delete-lnkf6" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.712434 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.712522 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"959ac7da-0d4b-48f3-84af-2650cd91c143","Type":"ContainerDied","Data":"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.712563 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"959ac7da-0d4b-48f3-84af-2650cd91c143","Type":"ContainerDied","Data":"b34e7186a5d0c35de269d77b36b38a5d3e968f3fd191a37b932c868decdb47b0"} Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.712771 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717005 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717059 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717109 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717134 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rn4rk\" (UniqueName: \"kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk\") pod \"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd\" (UID: \"c2e3f8ba-4fe2-433c-85d4-b30c488af3cd\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717152 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cb96\" (UniqueName: \"kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717195 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717214 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717236 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data\") pod \"39e34f52-669e-4086-94ef-a38542dbc6ea\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config\") pod \"585208a7-186b-40da-a7af-be303777e77c\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717278 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle\") pod \"959ac7da-0d4b-48f3-84af-2650cd91c143\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717294 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs\") pod \"39e34f52-669e-4086-94ef-a38542dbc6ea\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717317 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs\") pod \"39e34f52-669e-4086-94ef-a38542dbc6ea\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717333 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config\") pod \"959ac7da-0d4b-48f3-84af-2650cd91c143\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717357 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25ht4\" (UniqueName: \"kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4\") pod \"959ac7da-0d4b-48f3-84af-2650cd91c143\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717372 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnxwl\" (UniqueName: \"kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717390 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7cg2\" (UniqueName: \"kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2\") pod \"585208a7-186b-40da-a7af-be303777e77c\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717415 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717434 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data\") pod \"959ac7da-0d4b-48f3-84af-2650cd91c143\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717452 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs\") pod \"959ac7da-0d4b-48f3-84af-2650cd91c143\" (UID: \"959ac7da-0d4b-48f3-84af-2650cd91c143\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717467 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717482 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle\") pod \"39e34f52-669e-4086-94ef-a38542dbc6ea\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717500 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs\") pod \"585208a7-186b-40da-a7af-be303777e77c\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717521 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717545 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs\") pod \"49c9a390-4563-4e1e-a109-ff673e664409\" (UID: \"49c9a390-4563-4e1e-a109-ff673e664409\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717563 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs\") pod \"cb0b621b-d100-4ec9-b815-13e67489a2ac\" (UID: \"cb0b621b-d100-4ec9-b815-13e67489a2ac\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717578 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle\") pod \"585208a7-186b-40da-a7af-be303777e77c\" (UID: \"585208a7-186b-40da-a7af-be303777e77c\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfqhd\" (UniqueName: \"kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd\") pod \"39e34f52-669e-4086-94ef-a38542dbc6ea\" (UID: \"39e34f52-669e-4086-94ef-a38542dbc6ea\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717875 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717887 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j26jz\" (UniqueName: \"kubernetes.io/projected/5d1f5739-e8a4-4081-8104-57dfc250861a-kube-api-access-j26jz\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717896 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717906 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6949\" (UniqueName: \"kubernetes.io/projected/c0e2713c-5c3d-457f-b444-122039f003d3-kube-api-access-s6949\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717922 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717931 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717939 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htm6x\" (UniqueName: \"kubernetes.io/projected/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-kube-api-access-htm6x\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717948 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717960 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717969 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5d1f5739-e8a4-4081-8104-57dfc250861a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717978 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c94e18c-15c4-4ef6-929f-c1941dbd3919-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717986 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.717994 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0e2713c-5c3d-457f-b444-122039f003d3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.718004 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.718012 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2ljx\" (UniqueName: \"kubernetes.io/projected/5c94e18c-15c4-4ef6-929f-c1941dbd3919-kube-api-access-w2ljx\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.718020 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c94e18c-15c4-4ef6-929f-c1941dbd3919-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.721394 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "959ac7da-0d4b-48f3-84af-2650cd91c143" (UID: "959ac7da-0d4b-48f3-84af-2650cd91c143"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.722266 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs" (OuterVolumeSpecName: "logs") pod "39e34f52-669e-4086-94ef-a38542dbc6ea" (UID: "39e34f52-669e-4086-94ef-a38542dbc6ea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.730648 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs" (OuterVolumeSpecName: "logs") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.738795 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data" (OuterVolumeSpecName: "config-data") pod "959ac7da-0d4b-48f3-84af-2650cd91c143" (UID: "959ac7da-0d4b-48f3-84af-2650cd91c143"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.739161 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.739686 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.739726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs" (OuterVolumeSpecName: "logs") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.754622 4884 scope.go:117] "RemoveContainer" containerID="c9fbb45580ab524d334d02b6207e2923abff684e828f592b260b02070af2c617" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.768204 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl" (OuterVolumeSpecName: "kube-api-access-qnxwl") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "kube-api-access-qnxwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.771497 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4" (OuterVolumeSpecName: "kube-api-access-25ht4") pod "959ac7da-0d4b-48f3-84af-2650cd91c143" (UID: "959ac7da-0d4b-48f3-84af-2650cd91c143"). InnerVolumeSpecName "kube-api-access-25ht4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.772454 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd" (OuterVolumeSpecName: "kube-api-access-hfqhd") pod "39e34f52-669e-4086-94ef-a38542dbc6ea" (UID: "39e34f52-669e-4086-94ef-a38542dbc6ea"). InnerVolumeSpecName "kube-api-access-hfqhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.777606 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96" (OuterVolumeSpecName: "kube-api-access-8cb96") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "kube-api-access-8cb96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.782377 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.782550 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk" (OuterVolumeSpecName: "kube-api-access-rn4rk") pod "c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" (UID: "c2e3f8ba-4fe2-433c-85d4-b30c488af3cd"). InnerVolumeSpecName "kube-api-access-rn4rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.782983 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.783924 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.784241 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.785297 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2" (OuterVolumeSpecName: "kube-api-access-r7cg2") pod "585208a7-186b-40da-a7af-be303777e77c" (UID: "585208a7-186b-40da-a7af-be303777e77c"). InnerVolumeSpecName "kube-api-access-r7cg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.787945 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.808315 4884 scope.go:117] "RemoveContainer" containerID="eef21640f33775390cf080f05247adc8d777f8156f8175fa7cc4c284a30cc25c" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.821014 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementeaff-account-delete-tgs7q"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853746 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853769 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rn4rk\" (UniqueName: \"kubernetes.io/projected/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd-kube-api-access-rn4rk\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853779 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cb96\" (UniqueName: \"kubernetes.io/projected/49c9a390-4563-4e1e-a109-ff673e664409-kube-api-access-8cb96\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853787 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39e34f52-669e-4086-94ef-a38542dbc6ea-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853798 4884 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853807 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25ht4\" (UniqueName: \"kubernetes.io/projected/959ac7da-0d4b-48f3-84af-2650cd91c143-kube-api-access-25ht4\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853818 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnxwl\" (UniqueName: \"kubernetes.io/projected/cb0b621b-d100-4ec9-b815-13e67489a2ac-kube-api-access-qnxwl\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853826 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7cg2\" (UniqueName: \"kubernetes.io/projected/585208a7-186b-40da-a7af-be303777e77c-kube-api-access-r7cg2\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853834 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/959ac7da-0d4b-48f3-84af-2650cd91c143-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853843 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfqhd\" (UniqueName: \"kubernetes.io/projected/39e34f52-669e-4086-94ef-a38542dbc6ea-kube-api-access-hfqhd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853851 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49c9a390-4563-4e1e-a109-ff673e664409-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.853859 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb0b621b-d100-4ec9-b815-13e67489a2ac-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.854957 4884 scope.go:117] "RemoveContainer" containerID="ae68139dc724343bef752be8e1bfffeb9a53499255a3b5d1ed21470caa369d1d" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.909420 4884 scope.go:117] "RemoveContainer" containerID="ce2d368f9ec98e7e160e340cbf52ff57688a8c528c07d144e558a64b3ca00a72" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.911625 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.911964 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.920463 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data" (OuterVolumeSpecName: "config-data") pod "39e34f52-669e-4086-94ef-a38542dbc6ea" (UID: "39e34f52-669e-4086-94ef-a38542dbc6ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.920359 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.922213 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.955937 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxpd2\" (UniqueName: \"kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.956017 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gd2sd\" (UniqueName: \"kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd\") pod \"afabf28f-eb82-4439-aa4f-3154e1007bf5\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.956135 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs\") pod \"c29b378e-a008-4903-9ebd-2570d37d8a11\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957223 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957300 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom\") pod \"c29b378e-a008-4903-9ebd-2570d37d8a11\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957386 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs\") pod \"afabf28f-eb82-4439-aa4f-3154e1007bf5\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957447 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data\") pod \"afabf28f-eb82-4439-aa4f-3154e1007bf5\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957480 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle\") pod \"afabf28f-eb82-4439-aa4f-3154e1007bf5\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957644 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom\") pod \"afabf28f-eb82-4439-aa4f-3154e1007bf5\" (UID: \"afabf28f-eb82-4439-aa4f-3154e1007bf5\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957680 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957705 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g7jl\" (UniqueName: \"kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl\") pod \"c29b378e-a008-4903-9ebd-2570d37d8a11\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957721 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data\") pod \"c29b378e-a008-4903-9ebd-2570d37d8a11\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957770 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957794 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957819 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957855 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle\") pod \"c29b378e-a008-4903-9ebd-2570d37d8a11\" (UID: \"c29b378e-a008-4903-9ebd-2570d37d8a11\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.957878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd\") pod \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\" (UID: \"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98\") " Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958234 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958255 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958264 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958273 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958631 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958931 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs" (OuterVolumeSpecName: "logs") pod "c29b378e-a008-4903-9ebd-2570d37d8a11" (UID: "c29b378e-a008-4903-9ebd-2570d37d8a11"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.958986 4884 scope.go:117] "RemoveContainer" containerID="3abb3a7245fdef87ef691fa6295e4f7c18af329b641de8d6a74c5a455959669f" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.960997 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.961101 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs" (OuterVolumeSpecName: "logs") pod "afabf28f-eb82-4439-aa4f-3154e1007bf5" (UID: "afabf28f-eb82-4439-aa4f-3154e1007bf5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.989858 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts" (OuterVolumeSpecName: "scripts") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.989948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data" (OuterVolumeSpecName: "config-data") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990211 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990070 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd" (OuterVolumeSpecName: "kube-api-access-gd2sd") pod "afabf28f-eb82-4439-aa4f-3154e1007bf5" (UID: "afabf28f-eb82-4439-aa4f-3154e1007bf5"). InnerVolumeSpecName "kube-api-access-gd2sd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990149 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2" (OuterVolumeSpecName: "kube-api-access-xxpd2") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "kube-api-access-xxpd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.989848 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c29b378e-a008-4903-9ebd-2570d37d8a11" (UID: "c29b378e-a008-4903-9ebd-2570d37d8a11"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990204 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "afabf28f-eb82-4439-aa4f-3154e1007bf5" (UID: "afabf28f-eb82-4439-aa4f-3154e1007bf5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990242 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl" (OuterVolumeSpecName: "kube-api-access-4g7jl") pod "c29b378e-a008-4903-9ebd-2570d37d8a11" (UID: "c29b378e-a008-4903-9ebd-2570d37d8a11"). InnerVolumeSpecName "kube-api-access-4g7jl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.990889 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data" (OuterVolumeSpecName: "config-data") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:19 crc kubenswrapper[4884]: I1128 15:45:19.997190 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell1c1f2-account-delete-lnkf6"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.035384 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.036988 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "959ac7da-0d4b-48f3-84af-2650cd91c143" (UID: "959ac7da-0d4b-48f3-84af-2650cd91c143"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.041694 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c29b378e-a008-4903-9ebd-2570d37d8a11" (UID: "c29b378e-a008-4903-9ebd-2570d37d8a11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061462 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061509 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061526 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061538 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061547 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061560 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxpd2\" (UniqueName: \"kubernetes.io/projected/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-kube-api-access-xxpd2\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061570 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gd2sd\" (UniqueName: \"kubernetes.io/projected/afabf28f-eb82-4439-aa4f-3154e1007bf5-kube-api-access-gd2sd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061580 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061592 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c29b378e-a008-4903-9ebd-2570d37d8a11-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061608 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061622 4884 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061634 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061646 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afabf28f-eb82-4439-aa4f-3154e1007bf5-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061656 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.061665 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g7jl\" (UniqueName: \"kubernetes.io/projected/c29b378e-a008-4903-9ebd-2570d37d8a11-kube-api-access-4g7jl\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.077992 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "585208a7-186b-40da-a7af-be303777e77c" (UID: "585208a7-186b-40da-a7af-be303777e77c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.086513 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.088765 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.092429 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data" (OuterVolumeSpecName: "config-data") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.094776 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.106071 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.130944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data" (OuterVolumeSpecName: "config-data") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.130954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5d1f5739-e8a4-4081-8104-57dfc250861a" (UID: "5d1f5739-e8a4-4081-8104-57dfc250861a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.136019 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "959ac7da-0d4b-48f3-84af-2650cd91c143" (UID: "959ac7da-0d4b-48f3-84af-2650cd91c143"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.162970 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163028 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d1f5739-e8a4-4081-8104-57dfc250861a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163038 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163047 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163056 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163064 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163073 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163080 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.163108 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/959ac7da-0d4b-48f3-84af-2650cd91c143-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.164992 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fdd604f8-af2a-40bb-b85a-14d7a4eeb000" (UID: "fdd604f8-af2a-40bb-b85a-14d7a4eeb000"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.165715 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.166320 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39e34f52-669e-4086-94ef-a38542dbc6ea" (UID: "39e34f52-669e-4086-94ef-a38542dbc6ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.192259 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.193645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "585208a7-186b-40da-a7af-be303777e77c" (UID: "585208a7-186b-40da-a7af-be303777e77c"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.194517 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "afabf28f-eb82-4439-aa4f-3154e1007bf5" (UID: "afabf28f-eb82-4439-aa4f-3154e1007bf5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.200767 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.201662 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cb0b621b-d100-4ec9-b815-13e67489a2ac" (UID: "cb0b621b-d100-4ec9-b815-13e67489a2ac"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.211178 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data" (OuterVolumeSpecName: "config-data") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.226351 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "585208a7-186b-40da-a7af-be303777e77c" (UID: "585208a7-186b-40da-a7af-be303777e77c"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.235676 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5c94e18c-15c4-4ef6-929f-c1941dbd3919" (UID: "5c94e18c-15c4-4ef6-929f-c1941dbd3919"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.238290 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data" (OuterVolumeSpecName: "config-data") pod "fdd604f8-af2a-40bb-b85a-14d7a4eeb000" (UID: "fdd604f8-af2a-40bb-b85a-14d7a4eeb000"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.251546 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264174 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "39e34f52-669e-4086-94ef-a38542dbc6ea" (UID: "39e34f52-669e-4086-94ef-a38542dbc6ea"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264429 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264457 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264538 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264550 4884 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264561 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264571 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264581 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264592 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5c94e18c-15c4-4ef6-929f-c1941dbd3919-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264615 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdd604f8-af2a-40bb-b85a-14d7a4eeb000-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264626 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264638 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264649 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cb0b621b-d100-4ec9-b815-13e67489a2ac-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264659 4884 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/585208a7-186b-40da-a7af-be303777e77c-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.264670 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/39e34f52-669e-4086-94ef-a38542dbc6ea-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.268434 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data" (OuterVolumeSpecName: "config-data") pod "afabf28f-eb82-4439-aa4f-3154e1007bf5" (UID: "afabf28f-eb82-4439-aa4f-3154e1007bf5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.280012 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c0e2713c-5c3d-457f-b444-122039f003d3" (UID: "c0e2713c-5c3d-457f-b444-122039f003d3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.283649 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.284726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "49c9a390-4563-4e1e-a109-ff673e664409" (UID: "49c9a390-4563-4e1e-a109-ff673e664409"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.291441 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data" (OuterVolumeSpecName: "config-data") pod "c29b378e-a008-4903-9ebd-2570d37d8a11" (UID: "c29b378e-a008-4903-9ebd-2570d37d8a11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.318284 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data" (OuterVolumeSpecName: "config-data") pod "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" (UID: "91c5a29a-fbc0-4c91-a3a3-e4d96c911c98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367065 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afabf28f-eb82-4439-aa4f-3154e1007bf5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367115 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367146 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/49c9a390-4563-4e1e-a109-ff673e664409-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367160 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29b378e-a008-4903-9ebd-2570d37d8a11-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367171 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.367183 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0e2713c-5c3d-457f-b444-122039f003d3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: E1128 15:45:20.367250 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:20 crc kubenswrapper[4884]: E1128 15:45:20.367305 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data podName:454fa1ac-19ca-4c44-b0fb-2c30039524a7 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:28.36728531 +0000 UTC m=+1567.930069111 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data") pod "rabbitmq-cell1-server-0" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7") : configmap "rabbitmq-cell1-config-data" not found Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.461345 4884 scope.go:117] "RemoveContainer" containerID="754a22031c7e79239a68f9b96363ce0d184e5432cae3b7213429811a1a369d36" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.479001 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.487837 4884 scope.go:117] "RemoveContainer" containerID="42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.493398 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.501309 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.508243 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.515575 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.521446 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.522973 4884 scope.go:117] "RemoveContainer" containerID="42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e" Nov 28 15:45:20 crc kubenswrapper[4884]: E1128 15:45:20.523527 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e\": container with ID starting with 42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e not found: ID does not exist" containerID="42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.523719 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e"} err="failed to get container status \"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e\": rpc error: code = NotFound desc = could not find container \"42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e\": container with ID starting with 42130127dc2ba94bc72656bd8d93a01ffa0ecd2aba46468706b0b03bc8ce897e not found: ID does not exist" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.525413 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569168 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569213 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569273 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569348 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569382 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569414 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569450 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.569484 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjjl4\" (UniqueName: \"kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4\") pod \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\" (UID: \"2b1d00ac-0efe-45af-9366-f5d302b86ccb\") " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.573289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4" (OuterVolumeSpecName: "kube-api-access-sjjl4") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "kube-api-access-sjjl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.577113 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.577823 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.578312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.578687 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.581839 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets" (OuterVolumeSpecName: "secrets") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.594363 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "mysql-db") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.594682 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.601920 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.602156 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.621965 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2b1d00ac-0efe-45af-9366-f5d302b86ccb" (UID: "2b1d00ac-0efe-45af-9366-f5d302b86ccb"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.670704 4884 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.670738 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.670779 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671226 4884 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671245 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671276 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjjl4\" (UniqueName: \"kubernetes.io/projected/2b1d00ac-0efe-45af-9366-f5d302b86ccb-kube-api-access-sjjl4\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671352 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671366 4884 reconciler_common.go:293] "Volume detached for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/2b1d00ac-0efe-45af-9366-f5d302b86ccb-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.671377 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2b1d00ac-0efe-45af-9366-f5d302b86ccb-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.688343 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.706811 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" path="/var/lib/kubelet/pods/08c698b7-a3f8-4639-8237-a8e005ae2669/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.707495 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" path="/var/lib/kubelet/pods/4c2dcae8-7c76-46e9-90d4-afd8af5f474a/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.708262 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" path="/var/lib/kubelet/pods/5c94e18c-15c4-4ef6-929f-c1941dbd3919/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.728644 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" path="/var/lib/kubelet/pods/5d1f5739-e8a4-4081-8104-57dfc250861a/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.729596 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" path="/var/lib/kubelet/pods/6237eb73-294e-4e4b-a619-e669061a1b5b/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.730219 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" path="/var/lib/kubelet/pods/7bc5134b-7bda-47e0-86a3-b4f374e842e6/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.734784 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90481f2a-55d7-459f-9e46-2ca816951a8d" path="/var/lib/kubelet/pods/90481f2a-55d7-459f-9e46-2ca816951a8d/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.735443 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="959ac7da-0d4b-48f3-84af-2650cd91c143" path="/var/lib/kubelet/pods/959ac7da-0d4b-48f3-84af-2650cd91c143/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.735893 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" path="/var/lib/kubelet/pods/c1e2007d-c536-47f5-9d03-92069c96f654/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.740922 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" path="/var/lib/kubelet/pods/fdd604f8-af2a-40bb-b85a-14d7a4eeb000/volumes" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.752587 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.762561 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-75d9f978b8-b8bls" event={"ID":"c29b378e-a008-4903-9ebd-2570d37d8a11","Type":"ContainerDied","Data":"d0afc34282eeb448655090d53a5d3b043c457c04224d33cd424ddbef17ae8b3d"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.762624 4884 scope.go:117] "RemoveContainer" containerID="b5f34154cfef87b816d78795660796edfca1950b74e2cd3c82c08584957201a9" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.773053 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.783354 4884 generic.go:334] "Generic (PLEG): container finished" podID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerID="9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c" exitCode=0 Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.783436 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerDied","Data":"9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.786304 4884 generic.go:334] "Generic (PLEG): container finished" podID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerID="be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023" exitCode=0 Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.786355 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerDied","Data":"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.786377 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2b1d00ac-0efe-45af-9366-f5d302b86ccb","Type":"ContainerDied","Data":"dbe078766fa53127c485bc5b4e73106eeb3828ea5c293808dcca23903df29a2f"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.786427 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.792992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"91c5a29a-fbc0-4c91-a3a3-e4d96c911c98","Type":"ContainerDied","Data":"d523e316947963f98d42fc3f76f1e604a0f39747037e31c56ae19f0f8aa9da85"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.793384 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.837131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654d667c99-rmxwg" event={"ID":"afabf28f-eb82-4439-aa4f-3154e1007bf5","Type":"ContainerDied","Data":"810f27cfb84ea05c5b82665c8896f872a37c09460e1fd5324761989fdf2c4ae4"} Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.837170 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654d667c99-rmxwg" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.849151 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.850465 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican783f-account-delete-x9hl4" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.850784 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.850908 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-69c66bbb4b-wzn9n" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.850963 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.851265 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0992b-account-delete-sfgzb" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.852133 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.852139 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder0078-account-delete-nnnfh" Nov 28 15:45:20 crc kubenswrapper[4884]: I1128 15:45:20.893136 4884 scope.go:117] "RemoveContainer" containerID="55d3b2d4ca658ad8c1510b29e4414ddf3fb3b590d3f93d47c782a243e50ec135" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.039075 4884 scope.go:117] "RemoveContainer" containerID="be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.061521 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.070372 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican783f-account-delete-x9hl4"] Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.076185 4884 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.076724 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data podName:3a5d81bd-3b99-4aa6-82dc-2969295dce39 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:29.076235753 +0000 UTC m=+1568.639019554 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data") pod "rabbitmq-server-0" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39") : configmap "rabbitmq-config-data" not found Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.077647 4884 scope.go:117] "RemoveContainer" containerID="e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.088864 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.096565 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-654d667c99-rmxwg"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.104205 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.121686 4884 scope.go:117] "RemoveContainer" containerID="be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023" Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.122700 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023\": container with ID starting with be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023 not found: ID does not exist" containerID="be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.122750 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023"} err="failed to get container status \"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023\": rpc error: code = NotFound desc = could not find container \"be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023\": container with ID starting with be03385071ed29cca7637c4394831b4cf43223122bd8d05fc36eec774054d023 not found: ID does not exist" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.122781 4884 scope.go:117] "RemoveContainer" containerID="e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7" Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.123174 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7\": container with ID starting with e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7 not found: ID does not exist" containerID="e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.123209 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7"} err="failed to get container status \"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7\": rpc error: code = NotFound desc = could not find container \"e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7\": container with ID starting with e79428e5f123cef01303250d1098e6138d46cbdb204c25dc43f44a4947df45e7 not found: ID does not exist" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.123230 4884 scope.go:117] "RemoveContainer" containerID="4ebb0b7c11475336e72dfd52a2583fdc2dff261b45ff906832624ad2e97541ce" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.125724 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.135286 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.144370 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-69c66bbb4b-wzn9n"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.155570 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.164505 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.172568 4884 scope.go:117] "RemoveContainer" containerID="77143d790300f24e1ec71423bf2b5cceb1697eca359224ae98446bd1168afb55" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.174673 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.182021 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.193333 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.203647 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0992b-account-delete-sfgzb"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.204551 4884 scope.go:117] "RemoveContainer" containerID="b0e0f537c987f444034b20d830933a5ee4e9b9836a56cfdf4c62f7d8b6d604d0" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.211004 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.216916 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder0078-account-delete-nnnfh"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.221826 4884 scope.go:117] "RemoveContainer" containerID="5f7512ac40024faf5c002b3ea07aae281fd765dc79598f4d2e49a37be1071a24" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.223079 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.228823 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-75d9f978b8-b8bls"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.237327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.243000 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.243059 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.256894 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.261461 4884 scope.go:117] "RemoveContainer" containerID="02309734d390a1a60061c2de6a1ad950eea1ac1ad9073bbfc9869f4baf64d813" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.264977 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.269841 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.274847 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.280240 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.295577 4884 scope.go:117] "RemoveContainer" containerID="0ac907a0c0af6ead7b7e99a49821773ab7d22cc68e8accca85a5d26f4aca83ec" Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.882851 4884 generic.go:334] "Generic (PLEG): container finished" podID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerID="343440ad4aebfa33b7450af592a1090831143203f96732b5193c19160e303a6f" exitCode=0 Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.883083 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerDied","Data":"343440ad4aebfa33b7450af592a1090831143203f96732b5193c19160e303a6f"} Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.885147 4884 generic.go:334] "Generic (PLEG): container finished" podID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerID="5cea516e9ac81f0a76972ae4e518242f21039bb650a8fa7f56df976c1aec2cbc" exitCode=0 Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.885185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerDied","Data":"5cea516e9ac81f0a76972ae4e518242f21039bb650a8fa7f56df976c1aec2cbc"} Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.887071 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerStarted","Data":"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6"} Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.889894 4884 generic.go:334] "Generic (PLEG): container finished" podID="91405fb1-1a28-4fb4-9548-84c4b1797d45" containerID="b935ab2ff92c4824683a68fa77cd7073eb24e434873a920819150e3598f5fb46" exitCode=0 Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.889934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d44bc67d-rzq4r" event={"ID":"91405fb1-1a28-4fb4-9548-84c4b1797d45","Type":"ContainerDied","Data":"b935ab2ff92c4824683a68fa77cd7073eb24e434873a920819150e3598f5fb46"} Nov 28 15:45:21 crc kubenswrapper[4884]: I1128 15:45:21.905711 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5j947" podStartSLOduration=4.574403785 podStartE2EDuration="8.905696012s" podCreationTimestamp="2025-11-28 15:45:13 +0000 UTC" firstStartedPulling="2025-11-28 15:45:17.212485564 +0000 UTC m=+1556.775269365" lastFinishedPulling="2025-11-28 15:45:21.543777761 +0000 UTC m=+1561.106561592" observedRunningTime="2025-11-28 15:45:21.903823055 +0000 UTC m=+1561.466606856" watchObservedRunningTime="2025-11-28 15:45:21.905696012 +0000 UTC m=+1561.468479813" Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.978101 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:21 crc kubenswrapper[4884]: E1128 15:45:21.980918 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:22 crc kubenswrapper[4884]: E1128 15:45:22.028683 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:45:22 crc kubenswrapper[4884]: E1128 15:45:22.028733 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.042230 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094612 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094648 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094668 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094690 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094712 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094734 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.094753 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6m6l\" (UniqueName: \"kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.099436 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l" (OuterVolumeSpecName: "kube-api-access-j6m6l") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "kube-api-access-j6m6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.100302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts" (OuterVolumeSpecName: "scripts") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.100806 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.100844 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.139711 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-67dd7bd66f-2ff2l" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.154:9696/\": dial tcp 10.217.0.154:9696: connect: connection refused" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.141308 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data" (OuterVolumeSpecName: "config-data") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.142357 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: E1128 15:45:22.154327 4884 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 15:45:22 crc kubenswrapper[4884]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T15:45:15Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 15:45:22 crc kubenswrapper[4884]: /etc/init.d/functions: line 589: 424 Alarm clock "$@" Nov 28 15:45:22 crc kubenswrapper[4884]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-tnsft" message=< Nov 28 15:45:22 crc kubenswrapper[4884]: Exiting ovn-controller (1) [FAILED] Nov 28 15:45:22 crc kubenswrapper[4884]: Killing ovn-controller (1) [ OK ] Nov 28 15:45:22 crc kubenswrapper[4884]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 28 15:45:22 crc kubenswrapper[4884]: 2025-11-28T15:45:15Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 15:45:22 crc kubenswrapper[4884]: /etc/init.d/functions: line 589: 424 Alarm clock "$@" Nov 28 15:45:22 crc kubenswrapper[4884]: > Nov 28 15:45:22 crc kubenswrapper[4884]: E1128 15:45:22.154363 4884 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 15:45:22 crc kubenswrapper[4884]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T15:45:15Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 15:45:22 crc kubenswrapper[4884]: /etc/init.d/functions: line 589: 424 Alarm clock "$@" Nov 28 15:45:22 crc kubenswrapper[4884]: > pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" containerID="cri-o://526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.154405 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-tnsft" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" containerID="cri-o://526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" gracePeriod=22 Nov 28 15:45:22 crc kubenswrapper[4884]: E1128 15:45:22.183288 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs podName:91405fb1-1a28-4fb4-9548-84c4b1797d45 nodeName:}" failed. No retries permitted until 2025-11-28 15:45:22.683259954 +0000 UTC m=+1562.246043755 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "public-tls-certs" (UniqueName: "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45") : error deleting /var/lib/kubelet/pods/91405fb1-1a28-4fb4-9548-84c4b1797d45/volume-subpaths: remove /var/lib/kubelet/pods/91405fb1-1a28-4fb4-9548-84c4b1797d45/volume-subpaths: no such file or directory Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.188256 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203752 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203788 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203800 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203811 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203825 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203837 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.203848 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6m6l\" (UniqueName: \"kubernetes.io/projected/91405fb1-1a28-4fb4-9548-84c4b1797d45-kube-api-access-j6m6l\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.287630 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.290174 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406210 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406255 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406277 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406296 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406316 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406342 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406373 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406393 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406406 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406421 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406445 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406459 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406486 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406529 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406546 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st6wl\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406567 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406625 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406651 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406673 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data\") pod \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\" (UID: \"3a5d81bd-3b99-4aa6-82dc-2969295dce39\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.406704 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhz99\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99\") pod \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\" (UID: \"454fa1ac-19ca-4c44-b0fb-2c30039524a7\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.407699 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.408613 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.408623 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.408814 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.413508 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.416713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.416857 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.416946 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "persistence") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.419044 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl" (OuterVolumeSpecName: "kube-api-access-st6wl") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "kube-api-access-st6wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.419117 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.419266 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.426206 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.426309 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info" (OuterVolumeSpecName: "pod-info") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.431330 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info" (OuterVolumeSpecName: "pod-info") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.432473 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.433978 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99" (OuterVolumeSpecName: "kube-api-access-rhz99") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "kube-api-access-rhz99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.460827 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf" (OuterVolumeSpecName: "server-conf") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.462983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf" (OuterVolumeSpecName: "server-conf") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.478482 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data" (OuterVolumeSpecName: "config-data") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.486011 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data" (OuterVolumeSpecName: "config-data") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508724 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508762 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508774 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhz99\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-kube-api-access-rhz99\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508787 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508818 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508830 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508842 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508854 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508866 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508876 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508892 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508904 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508918 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508929 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/454fa1ac-19ca-4c44-b0fb-2c30039524a7-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508939 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3a5d81bd-3b99-4aa6-82dc-2969295dce39-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508949 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st6wl\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-kube-api-access-st6wl\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508959 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/454fa1ac-19ca-4c44-b0fb-2c30039524a7-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508969 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3a5d81bd-3b99-4aa6-82dc-2969295dce39-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508979 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/454fa1ac-19ca-4c44-b0fb-2c30039524a7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.508989 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3a5d81bd-3b99-4aa6-82dc-2969295dce39-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.541010 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "454fa1ac-19ca-4c44-b0fb-2c30039524a7" (UID: "454fa1ac-19ca-4c44-b0fb-2c30039524a7"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.541970 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-tnsft_f314d326-c20e-41cb-8fb5-a608d002b170/ovn-controller/0.log" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.542042 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.545412 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.556676 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.575352 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3a5d81bd-3b99-4aa6-82dc-2969295dce39" (UID: "3a5d81bd-3b99-4aa6-82dc-2969295dce39"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609632 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmqpn\" (UniqueName: \"kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609759 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609885 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609937 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.609993 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610052 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn\") pod \"f314d326-c20e-41cb-8fb5-a608d002b170\" (UID: \"f314d326-c20e-41cb-8fb5-a608d002b170\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610389 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3a5d81bd-3b99-4aa6-82dc-2969295dce39-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610405 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610418 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610430 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/454fa1ac-19ca-4c44-b0fb-2c30039524a7-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.610493 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.611325 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.611325 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run" (OuterVolumeSpecName: "var-run") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.612336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts" (OuterVolumeSpecName: "scripts") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.613406 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn" (OuterVolumeSpecName: "kube-api-access-wmqpn") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "kube-api-access-wmqpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.637302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.673243 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "f314d326-c20e-41cb-8fb5-a608d002b170" (UID: "f314d326-c20e-41cb-8fb5-a608d002b170"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.699979 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" path="/var/lib/kubelet/pods/2b1d00ac-0efe-45af-9366-f5d302b86ccb/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.700831 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38ca29b2-4263-4cb5-ba00-fe95430cf7f6" path="/var/lib/kubelet/pods/38ca29b2-4263-4cb5-ba00-fe95430cf7f6/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.701536 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" path="/var/lib/kubelet/pods/39e34f52-669e-4086-94ef-a38542dbc6ea/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.702859 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c9a390-4563-4e1e-a109-ff673e664409" path="/var/lib/kubelet/pods/49c9a390-4563-4e1e-a109-ff673e664409/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.703576 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="585208a7-186b-40da-a7af-be303777e77c" path="/var/lib/kubelet/pods/585208a7-186b-40da-a7af-be303777e77c/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.704244 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" path="/var/lib/kubelet/pods/91c5a29a-fbc0-4c91-a3a3-e4d96c911c98/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.705716 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" path="/var/lib/kubelet/pods/afabf28f-eb82-4439-aa4f-3154e1007bf5/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.706531 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" path="/var/lib/kubelet/pods/c0e2713c-5c3d-457f-b444-122039f003d3/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.708010 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" path="/var/lib/kubelet/pods/c29b378e-a008-4903-9ebd-2570d37d8a11/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.708775 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" path="/var/lib/kubelet/pods/c2e3f8ba-4fe2-433c-85d4-b30c488af3cd/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.709455 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" path="/var/lib/kubelet/pods/cb0b621b-d100-4ec9-b815-13e67489a2ac/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.710675 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f633ca3e-03a6-4c24-9783-94fb61ed0ade" path="/var/lib/kubelet/pods/f633ca3e-03a6-4c24-9783-94fb61ed0ade/volumes" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.711420 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") pod \"91405fb1-1a28-4fb4-9548-84c4b1797d45\" (UID: \"91405fb1-1a28-4fb4-9548-84c4b1797d45\") " Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.714575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "91405fb1-1a28-4fb4-9548-84c4b1797d45" (UID: "91405fb1-1a28-4fb4-9548-84c4b1797d45"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.722961 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.722986 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.722998 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/f314d326-c20e-41cb-8fb5-a608d002b170-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.723007 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f314d326-c20e-41cb-8fb5-a608d002b170-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.723016 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/91405fb1-1a28-4fb4-9548-84c4b1797d45-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.723026 4884 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.723041 4884 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f314d326-c20e-41cb-8fb5-a608d002b170-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.723058 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmqpn\" (UniqueName: \"kubernetes.io/projected/f314d326-c20e-41cb-8fb5-a608d002b170-kube-api-access-wmqpn\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.910939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3a5d81bd-3b99-4aa6-82dc-2969295dce39","Type":"ContainerDied","Data":"21d21e46f032bd54f79671f22bb01555ab1035acaacad9b170e3ed659f48d1fa"} Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.910986 4884 scope.go:117] "RemoveContainer" containerID="343440ad4aebfa33b7450af592a1090831143203f96732b5193c19160e303a6f" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.911144 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.915885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"454fa1ac-19ca-4c44-b0fb-2c30039524a7","Type":"ContainerDied","Data":"138c21efc8405d370794d7b08f6bec398024dcaba5158a73178620e924251f6e"} Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.915978 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.918492 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-tnsft_f314d326-c20e-41cb-8fb5-a608d002b170/ovn-controller/0.log" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.918535 4884 generic.go:334] "Generic (PLEG): container finished" podID="f314d326-c20e-41cb-8fb5-a608d002b170" containerID="526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" exitCode=137 Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.918616 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnsft" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.918618 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft" event={"ID":"f314d326-c20e-41cb-8fb5-a608d002b170","Type":"ContainerDied","Data":"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554"} Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.918662 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnsft" event={"ID":"f314d326-c20e-41cb-8fb5-a608d002b170","Type":"ContainerDied","Data":"3ac63c313a3776367e5a7d8ec66a5be09ebaed1a359805edbe89bf1de42ef881"} Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.921779 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7d44bc67d-rzq4r" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.922408 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7d44bc67d-rzq4r" event={"ID":"91405fb1-1a28-4fb4-9548-84c4b1797d45","Type":"ContainerDied","Data":"0d6f39598d566bf6fe9b24417092ac23d73694449aa875215bc03406fe1ff65a"} Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.931699 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.950630 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.971629 4884 scope.go:117] "RemoveContainer" containerID="261aa2bf3aa707f9ac09db8380136376399868fb0e2b7fcc555077a346a02c2f" Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.972929 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.980008 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tnsft"] Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.989984 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:45:22 crc kubenswrapper[4884]: I1128 15:45:22.995872 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.007739 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.018969 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7d44bc67d-rzq4r"] Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.162773 4884 scope.go:117] "RemoveContainer" containerID="5cea516e9ac81f0a76972ae4e518242f21039bb650a8fa7f56df976c1aec2cbc" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.191083 4884 scope.go:117] "RemoveContainer" containerID="1f257c9ad37cf0a9ca938f7cf1d6a36a190fd223c5c58821c4e590d39987fda9" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.209263 4884 scope.go:117] "RemoveContainer" containerID="526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.248797 4884 scope.go:117] "RemoveContainer" containerID="526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.249260 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554\": container with ID starting with 526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554 not found: ID does not exist" containerID="526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.249302 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554"} err="failed to get container status \"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554\": rpc error: code = NotFound desc = could not find container \"526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554\": container with ID starting with 526500aaa0277f7472d70a27831a0b7504bc120960a24d1489ad114bac280554 not found: ID does not exist" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.249327 4884 scope.go:117] "RemoveContainer" containerID="b935ab2ff92c4824683a68fa77cd7073eb24e434873a920819150e3598f5fb46" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.341076 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.430696 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data\") pod \"0264205a-7b80-4df8-8d57-3923074f4a59\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.430992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsfq8\" (UniqueName: \"kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8\") pod \"0264205a-7b80-4df8-8d57-3923074f4a59\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.431210 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle\") pod \"0264205a-7b80-4df8-8d57-3923074f4a59\" (UID: \"0264205a-7b80-4df8-8d57-3923074f4a59\") " Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.434403 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8" (OuterVolumeSpecName: "kube-api-access-fsfq8") pod "0264205a-7b80-4df8-8d57-3923074f4a59" (UID: "0264205a-7b80-4df8-8d57-3923074f4a59"). InnerVolumeSpecName "kube-api-access-fsfq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.460465 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data" (OuterVolumeSpecName: "config-data") pod "0264205a-7b80-4df8-8d57-3923074f4a59" (UID: "0264205a-7b80-4df8-8d57-3923074f4a59"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.461725 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0264205a-7b80-4df8-8d57-3923074f4a59" (UID: "0264205a-7b80-4df8-8d57-3923074f4a59"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.532450 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.532477 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0264205a-7b80-4df8-8d57-3923074f4a59-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.532487 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsfq8\" (UniqueName: \"kubernetes.io/projected/0264205a-7b80-4df8-8d57-3923074f4a59-kube-api-access-fsfq8\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.714071 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.714499 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.714865 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.714892 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.715296 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.716606 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.717809 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.717839 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.934868 4884 generic.go:334] "Generic (PLEG): container finished" podID="0264205a-7b80-4df8-8d57-3923074f4a59" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" exitCode=0 Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.934918 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0264205a-7b80-4df8-8d57-3923074f4a59","Type":"ContainerDied","Data":"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588"} Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.934942 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0264205a-7b80-4df8-8d57-3923074f4a59","Type":"ContainerDied","Data":"bdd402fc4cecc3bbf5078c050af471ca791f473998a04a5f22d577081ea0d273"} Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.934958 4884 scope.go:117] "RemoveContainer" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.935043 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.971241 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.975525 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.988492 4884 scope.go:117] "RemoveContainer" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" Nov 28 15:45:23 crc kubenswrapper[4884]: E1128 15:45:23.990123 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588\": container with ID starting with b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588 not found: ID does not exist" containerID="b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588" Nov 28 15:45:23 crc kubenswrapper[4884]: I1128 15:45:23.990189 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588"} err="failed to get container status \"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588\": rpc error: code = NotFound desc = could not find container \"b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588\": container with ID starting with b428639346c1037adf986906fd711c31d02cf99e9259b57dc0bb07db6ec86588 not found: ID does not exist" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.589537 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-69c66bbb4b-wzn9n" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.160:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.589621 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-69c66bbb4b-wzn9n" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.160:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.707511 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" path="/var/lib/kubelet/pods/0264205a-7b80-4df8-8d57-3923074f4a59/volumes" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.709247 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" path="/var/lib/kubelet/pods/3a5d81bd-3b99-4aa6-82dc-2969295dce39/volumes" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.710688 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" path="/var/lib/kubelet/pods/454fa1ac-19ca-4c44-b0fb-2c30039524a7/volumes" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.712894 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91405fb1-1a28-4fb4-9548-84c4b1797d45" path="/var/lib/kubelet/pods/91405fb1-1a28-4fb4-9548-84c4b1797d45/volumes" Nov 28 15:45:24 crc kubenswrapper[4884]: I1128 15:45:24.714419 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" path="/var/lib/kubelet/pods/f314d326-c20e-41cb-8fb5-a608d002b170/volumes" Nov 28 15:45:25 crc kubenswrapper[4884]: I1128 15:45:25.341130 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:25 crc kubenswrapper[4884]: I1128 15:45:25.341201 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:25 crc kubenswrapper[4884]: I1128 15:45:25.394172 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:25 crc kubenswrapper[4884]: I1128 15:45:25.985626 4884 generic.go:334] "Generic (PLEG): container finished" podID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerID="f5481c335b4d3298f0e98cb830ee1e1cade042a57387db9cfad9455b405cd581" exitCode=0 Nov 28 15:45:25 crc kubenswrapper[4884]: I1128 15:45:25.985959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerDied","Data":"f5481c335b4d3298f0e98cb830ee1e1cade042a57387db9cfad9455b405cd581"} Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.281014 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.401754 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.401837 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.401861 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.401932 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.401992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct6rx\" (UniqueName: \"kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.402015 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.402142 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle\") pod \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\" (UID: \"458e9b5e-8446-4bfa-ba33-12a3a32c74ea\") " Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.420547 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.420646 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx" (OuterVolumeSpecName: "kube-api-access-ct6rx") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "kube-api-access-ct6rx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.444026 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.449246 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.454721 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.455830 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config" (OuterVolumeSpecName: "config") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.474428 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "458e9b5e-8446-4bfa-ba33-12a3a32c74ea" (UID: "458e9b5e-8446-4bfa-ba33-12a3a32c74ea"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503895 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503937 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503948 4884 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503960 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503973 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.503986 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct6rx\" (UniqueName: \"kubernetes.io/projected/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-kube-api-access-ct6rx\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:26 crc kubenswrapper[4884]: I1128 15:45:26.504002 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/458e9b5e-8446-4bfa-ba33-12a3a32c74ea-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:26.999633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-67dd7bd66f-2ff2l" event={"ID":"458e9b5e-8446-4bfa-ba33-12a3a32c74ea","Type":"ContainerDied","Data":"7cc9183f634ad8d78ec2fd6ec92a6c26b6b223f306419a67456fcc01d8397755"} Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:27.000031 4884 scope.go:117] "RemoveContainer" containerID="6058d7f72df016355acac3c48d8e08bd8294db35efcb91a326adb1708153048c" Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:26.999684 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-67dd7bd66f-2ff2l" Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:27.035515 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:27.037218 4884 scope.go:117] "RemoveContainer" containerID="f5481c335b4d3298f0e98cb830ee1e1cade042a57387db9cfad9455b405cd581" Nov 28 15:45:27 crc kubenswrapper[4884]: I1128 15:45:27.042390 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-67dd7bd66f-2ff2l"] Nov 28 15:45:28 crc kubenswrapper[4884]: I1128 15:45:28.697986 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" path="/var/lib/kubelet/pods/458e9b5e-8446-4bfa-ba33-12a3a32c74ea/volumes" Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.716856 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.717204 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.721223 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.721354 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.721389 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.722972 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.727447 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:28 crc kubenswrapper[4884]: E1128 15:45:28.727520 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.714384 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.715691 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.715847 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.716185 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.716236 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.717953 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.722124 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:33 crc kubenswrapper[4884]: E1128 15:45:33.722221 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:35 crc kubenswrapper[4884]: I1128 15:45:35.387505 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:35 crc kubenswrapper[4884]: I1128 15:45:35.449973 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:36 crc kubenswrapper[4884]: I1128 15:45:36.104316 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5j947" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="registry-server" containerID="cri-o://91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6" gracePeriod=2 Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.111077 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.114889 4884 generic.go:334] "Generic (PLEG): container finished" podID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerID="91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6" exitCode=0 Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.114950 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerDied","Data":"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6"} Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.114977 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5j947" event={"ID":"ebf7deee-37f0-4b47-8e10-3027e7009916","Type":"ContainerDied","Data":"8c0e43646c29184c6b847236372ebf4db7aaa9a8bf7767ab6f834a471f9bba70"} Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.114997 4884 scope.go:117] "RemoveContainer" containerID="91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.115130 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5j947" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.141292 4884 scope.go:117] "RemoveContainer" containerID="9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.164855 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content\") pod \"ebf7deee-37f0-4b47-8e10-3027e7009916\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.165011 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffpzn\" (UniqueName: \"kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn\") pod \"ebf7deee-37f0-4b47-8e10-3027e7009916\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.165034 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities\") pod \"ebf7deee-37f0-4b47-8e10-3027e7009916\" (UID: \"ebf7deee-37f0-4b47-8e10-3027e7009916\") " Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.166348 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities" (OuterVolumeSpecName: "utilities") pod "ebf7deee-37f0-4b47-8e10-3027e7009916" (UID: "ebf7deee-37f0-4b47-8e10-3027e7009916"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.177642 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn" (OuterVolumeSpecName: "kube-api-access-ffpzn") pod "ebf7deee-37f0-4b47-8e10-3027e7009916" (UID: "ebf7deee-37f0-4b47-8e10-3027e7009916"). InnerVolumeSpecName "kube-api-access-ffpzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.177716 4884 scope.go:117] "RemoveContainer" containerID="532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.217512 4884 scope.go:117] "RemoveContainer" containerID="91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6" Nov 28 15:45:37 crc kubenswrapper[4884]: E1128 15:45:37.218978 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6\": container with ID starting with 91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6 not found: ID does not exist" containerID="91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.219005 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6"} err="failed to get container status \"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6\": rpc error: code = NotFound desc = could not find container \"91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6\": container with ID starting with 91670569e27c0a92bab46c951112f0be4c8c3d08dc8ca90b160dbd08eef6d4b6 not found: ID does not exist" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.219024 4884 scope.go:117] "RemoveContainer" containerID="9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c" Nov 28 15:45:37 crc kubenswrapper[4884]: E1128 15:45:37.219449 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c\": container with ID starting with 9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c not found: ID does not exist" containerID="9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.219471 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c"} err="failed to get container status \"9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c\": rpc error: code = NotFound desc = could not find container \"9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c\": container with ID starting with 9caf6e381d3590ad58a4e6538da3980c9ea5c66beac93672238273336a5c156c not found: ID does not exist" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.219483 4884 scope.go:117] "RemoveContainer" containerID="532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df" Nov 28 15:45:37 crc kubenswrapper[4884]: E1128 15:45:37.219966 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df\": container with ID starting with 532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df not found: ID does not exist" containerID="532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.219988 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df"} err="failed to get container status \"532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df\": rpc error: code = NotFound desc = could not find container \"532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df\": container with ID starting with 532bc05feb3bba15299e3bcbd085a4a8f5310bef514bf9d55a788d94db8bd6df not found: ID does not exist" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.235812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ebf7deee-37f0-4b47-8e10-3027e7009916" (UID: "ebf7deee-37f0-4b47-8e10-3027e7009916"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.266821 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.266848 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffpzn\" (UniqueName: \"kubernetes.io/projected/ebf7deee-37f0-4b47-8e10-3027e7009916-kube-api-access-ffpzn\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.266862 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ebf7deee-37f0-4b47-8e10-3027e7009916-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.468250 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:37 crc kubenswrapper[4884]: I1128 15:45:37.475532 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5j947"] Nov 28 15:45:38 crc kubenswrapper[4884]: I1128 15:45:38.701681 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" path="/var/lib/kubelet/pods/ebf7deee-37f0-4b47-8e10-3027e7009916/volumes" Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.715259 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.716155 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.716650 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.716728 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.717891 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.720739 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.722916 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:38 crc kubenswrapper[4884]: E1128 15:45:38.722978 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.714915 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.716460 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.716484 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.717410 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.717469 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.718234 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.719723 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 15:45:43 crc kubenswrapper[4884]: E1128 15:45:43.719807 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-vm8q9" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.644267 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.690301 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock\") pod \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.690770 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock" (OuterVolumeSpecName: "lock") pod "772cd3d6-daa4-4494-9ce8-6182e011fbc4" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.690970 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache\") pod \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.691027 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") pod \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.691054 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs4vn\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn\") pod \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.691077 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\" (UID: \"772cd3d6-daa4-4494-9ce8-6182e011fbc4\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.691325 4884 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-lock\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.692197 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache" (OuterVolumeSpecName: "cache") pod "772cd3d6-daa4-4494-9ce8-6182e011fbc4" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.696207 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "swift") pod "772cd3d6-daa4-4494-9ce8-6182e011fbc4" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.696279 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "772cd3d6-daa4-4494-9ce8-6182e011fbc4" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.696344 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn" (OuterVolumeSpecName: "kube-api-access-rs4vn") pod "772cd3d6-daa4-4494-9ce8-6182e011fbc4" (UID: "772cd3d6-daa4-4494-9ce8-6182e011fbc4"). InnerVolumeSpecName "kube-api-access-rs4vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.792300 4884 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/772cd3d6-daa4-4494-9ce8-6182e011fbc4-cache\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.792331 4884 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.792343 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs4vn\" (UniqueName: \"kubernetes.io/projected/772cd3d6-daa4-4494-9ce8-6182e011fbc4-kube-api-access-rs4vn\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.792369 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.808890 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.879318 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vm8q9_ce9413d4-1548-44f6-a50d-dcae9284f674/ovs-vswitchd/0.log" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.880048 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.893747 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995020 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995189 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib" (OuterVolumeSpecName: "var-lib") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995296 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995296 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run" (OuterVolumeSpecName: "var-run") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995341 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995389 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log" (OuterVolumeSpecName: "var-log") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.995434 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xjfb\" (UniqueName: \"kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb\") pod \"ce9413d4-1548-44f6-a50d-dcae9284f674\" (UID: \"ce9413d4-1548-44f6-a50d-dcae9284f674\") " Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.996036 4884 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.996063 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.996075 4884 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.996102 4884 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ce9413d4-1548-44f6-a50d-dcae9284f674-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.996390 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts" (OuterVolumeSpecName: "scripts") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:44 crc kubenswrapper[4884]: I1128 15:45:44.998882 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb" (OuterVolumeSpecName: "kube-api-access-8xjfb") pod "ce9413d4-1548-44f6-a50d-dcae9284f674" (UID: "ce9413d4-1548-44f6-a50d-dcae9284f674"). InnerVolumeSpecName "kube-api-access-8xjfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.097904 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xjfb\" (UniqueName: \"kubernetes.io/projected/ce9413d4-1548-44f6-a50d-dcae9284f674-kube-api-access-8xjfb\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.097956 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce9413d4-1548-44f6-a50d-dcae9284f674-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.233419 4884 generic.go:334] "Generic (PLEG): container finished" podID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerID="f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f" exitCode=137 Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.233626 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.233653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f"} Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.234086 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"772cd3d6-daa4-4494-9ce8-6182e011fbc4","Type":"ContainerDied","Data":"504b2bb90da5e9230841c5ff12d642bce3a47453c25abfccf8fcd39258473aee"} Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.234163 4884 scope.go:117] "RemoveContainer" containerID="f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.238168 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-vm8q9_ce9413d4-1548-44f6-a50d-dcae9284f674/ovs-vswitchd/0.log" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.239509 4884 generic.go:334] "Generic (PLEG): container finished" podID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" exitCode=137 Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.239585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerDied","Data":"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f"} Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.239648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-vm8q9" event={"ID":"ce9413d4-1548-44f6-a50d-dcae9284f674","Type":"ContainerDied","Data":"42dd396c596ca2375cb58db1a0cae311ef33b9931da3c9fa6a0d428e8aff4c66"} Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.239761 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-vm8q9" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.271237 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podc33f5fde-1bf4-406f-ad32-a1ed3ee3db9f"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podc33f5fde-1bf4-406f-ad32-a1ed3ee3db9f] : Timed out while waiting for systemd to remove kubepods-besteffort-podc33f5fde_1bf4_406f_ad32_a1ed3ee3db9f.slice" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.271299 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podc33f5fde-1bf4-406f-ad32-a1ed3ee3db9f] : unable to destroy cgroup paths for cgroup [kubepods besteffort podc33f5fde-1bf4-406f-ad32-a1ed3ee3db9f] : Timed out while waiting for systemd to remove kubepods-besteffort-podc33f5fde_1bf4_406f_ad32_a1ed3ee3db9f.slice" pod="openstack/ovsdbserver-nb-0" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.277314 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.281070 4884 scope.go:117] "RemoveContainer" containerID="a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.288324 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.295996 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.297260 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod6cdac902-b0e5-4f41-923c-07241207d730"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod6cdac902-b0e5-4f41-923c-07241207d730] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6cdac902_b0e5_4f41_923c_07241207d730.slice" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.303320 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-vm8q9"] Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.304516 4884 scope.go:117] "RemoveContainer" containerID="bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.325038 4884 scope.go:117] "RemoveContainer" containerID="742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.343520 4884 scope.go:117] "RemoveContainer" containerID="b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.363041 4884 scope.go:117] "RemoveContainer" containerID="e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.387765 4884 scope.go:117] "RemoveContainer" containerID="6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.415387 4884 scope.go:117] "RemoveContainer" containerID="abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.435639 4884 scope.go:117] "RemoveContainer" containerID="70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.465407 4884 scope.go:117] "RemoveContainer" containerID="644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.483378 4884 scope.go:117] "RemoveContainer" containerID="88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.502451 4884 scope.go:117] "RemoveContainer" containerID="6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.526240 4884 scope.go:117] "RemoveContainer" containerID="e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.553374 4884 scope.go:117] "RemoveContainer" containerID="0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.580588 4884 scope.go:117] "RemoveContainer" containerID="a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.607879 4884 scope.go:117] "RemoveContainer" containerID="f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.608755 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f\": container with ID starting with f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f not found: ID does not exist" containerID="f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.608823 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f"} err="failed to get container status \"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f\": rpc error: code = NotFound desc = could not find container \"f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f\": container with ID starting with f9e44ba3a1e879848a589080fb92632f8d1878f1865a84b435e4a8ead543b92f not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.608865 4884 scope.go:117] "RemoveContainer" containerID="a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.609456 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c\": container with ID starting with a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c not found: ID does not exist" containerID="a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.609517 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c"} err="failed to get container status \"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c\": rpc error: code = NotFound desc = could not find container \"a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c\": container with ID starting with a1d6f985c32764da34b785e30f2acf14507bf697b6e798eae5fa3254aa04c60c not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.609553 4884 scope.go:117] "RemoveContainer" containerID="bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.609951 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe\": container with ID starting with bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe not found: ID does not exist" containerID="bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610001 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe"} err="failed to get container status \"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe\": rpc error: code = NotFound desc = could not find container \"bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe\": container with ID starting with bcc615161c955567d8fbe9c7db4ecac30675b70bb9f42fdc0fad93e063a06cfe not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610030 4884 scope.go:117] "RemoveContainer" containerID="742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.610409 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff\": container with ID starting with 742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff not found: ID does not exist" containerID="742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610448 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff"} err="failed to get container status \"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff\": rpc error: code = NotFound desc = could not find container \"742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff\": container with ID starting with 742cc9edfeb8ab263aae0d7aebd8e217a0d9d9f3d99eb8541186a34de5aecfff not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610479 4884 scope.go:117] "RemoveContainer" containerID="b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.610830 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5\": container with ID starting with b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5 not found: ID does not exist" containerID="b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610867 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5"} err="failed to get container status \"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5\": rpc error: code = NotFound desc = could not find container \"b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5\": container with ID starting with b87c7a106e8bda8c93253ba6e8cab71c66461bda494fae4c0a993285fbf112b5 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.610896 4884 scope.go:117] "RemoveContainer" containerID="e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.611269 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024\": container with ID starting with e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024 not found: ID does not exist" containerID="e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.611309 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024"} err="failed to get container status \"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024\": rpc error: code = NotFound desc = could not find container \"e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024\": container with ID starting with e659a095f34585476c0335896dc8a5db03a480bee137c32e9a8bda1eb8108024 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.611334 4884 scope.go:117] "RemoveContainer" containerID="6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.611916 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f\": container with ID starting with 6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f not found: ID does not exist" containerID="6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.611948 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f"} err="failed to get container status \"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f\": rpc error: code = NotFound desc = could not find container \"6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f\": container with ID starting with 6813c74685bc51ca5821e0bdf73381e84889a14e0e59efd5441d4a01ffd4b33f not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.611967 4884 scope.go:117] "RemoveContainer" containerID="abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.612302 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83\": container with ID starting with abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83 not found: ID does not exist" containerID="abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.612334 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83"} err="failed to get container status \"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83\": rpc error: code = NotFound desc = could not find container \"abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83\": container with ID starting with abbdb319289be267fa04d7a0f2d35cff126124c54375efa6d4466b11680a2c83 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.612360 4884 scope.go:117] "RemoveContainer" containerID="70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.612779 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a\": container with ID starting with 70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a not found: ID does not exist" containerID="70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.612810 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a"} err="failed to get container status \"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a\": rpc error: code = NotFound desc = could not find container \"70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a\": container with ID starting with 70b545432347bc86084ee8281bb795d09f0e278180f78f36f0f25a89b793d40a not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.612851 4884 scope.go:117] "RemoveContainer" containerID="644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.613157 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754\": container with ID starting with 644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754 not found: ID does not exist" containerID="644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.613187 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754"} err="failed to get container status \"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754\": rpc error: code = NotFound desc = could not find container \"644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754\": container with ID starting with 644d82de31af947cde9160675a9d59cae687f5cb707dfc265cd4658e70855754 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.613204 4884 scope.go:117] "RemoveContainer" containerID="88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.613498 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d\": container with ID starting with 88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d not found: ID does not exist" containerID="88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.613518 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d"} err="failed to get container status \"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d\": rpc error: code = NotFound desc = could not find container \"88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d\": container with ID starting with 88d00a2a675f61732a17b69d86402accdfa806c3969fe9dc811740427ab59e9d not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.613532 4884 scope.go:117] "RemoveContainer" containerID="6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.613980 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a\": container with ID starting with 6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a not found: ID does not exist" containerID="6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.614012 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a"} err="failed to get container status \"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a\": rpc error: code = NotFound desc = could not find container \"6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a\": container with ID starting with 6408e17dcb73c7240ba0ca358cae93a142434cd1c0ccbee5620a35290db3666a not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.614031 4884 scope.go:117] "RemoveContainer" containerID="e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.614483 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0\": container with ID starting with e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0 not found: ID does not exist" containerID="e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.614525 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0"} err="failed to get container status \"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0\": rpc error: code = NotFound desc = could not find container \"e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0\": container with ID starting with e8d869d8f29b7fafbee7732a5156e8efa8c7e7445056613c536a9502f8e416d0 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.614559 4884 scope.go:117] "RemoveContainer" containerID="0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.614980 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8\": container with ID starting with 0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8 not found: ID does not exist" containerID="0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.615014 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8"} err="failed to get container status \"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8\": rpc error: code = NotFound desc = could not find container \"0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8\": container with ID starting with 0e1808e0a2dcf8ced0b179dd8a7c68e70ddabaee15a51c37bec8b7478fbec1d8 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.615032 4884 scope.go:117] "RemoveContainer" containerID="a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.615440 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09\": container with ID starting with a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09 not found: ID does not exist" containerID="a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.615486 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09"} err="failed to get container status \"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09\": rpc error: code = NotFound desc = could not find container \"a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09\": container with ID starting with a57f027c4d4c949e20e3feac4f99c563eeae6adea0636590eb013f68de166b09 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.615516 4884 scope.go:117] "RemoveContainer" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.637913 4884 scope.go:117] "RemoveContainer" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.668752 4884 scope.go:117] "RemoveContainer" containerID="f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.695248 4884 scope.go:117] "RemoveContainer" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.695723 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f\": container with ID starting with 76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f not found: ID does not exist" containerID="76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.695774 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f"} err="failed to get container status \"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f\": rpc error: code = NotFound desc = could not find container \"76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f\": container with ID starting with 76cdc3790604318a6e89c90d2cd09a42a363b0698b2f06d22f7b94e2f484dc1f not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.695817 4884 scope.go:117] "RemoveContainer" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.696232 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4\": container with ID starting with 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 not found: ID does not exist" containerID="88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.696270 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4"} err="failed to get container status \"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4\": rpc error: code = NotFound desc = could not find container \"88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4\": container with ID starting with 88c68a7a3d0f992bfb67f05d38ad06bf86c10efcd9722b052b0a0907046da6e4 not found: ID does not exist" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.696298 4884 scope.go:117] "RemoveContainer" containerID="f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5" Nov 28 15:45:45 crc kubenswrapper[4884]: E1128 15:45:45.696645 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5\": container with ID starting with f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5 not found: ID does not exist" containerID="f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5" Nov 28 15:45:45 crc kubenswrapper[4884]: I1128 15:45:45.696669 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5"} err="failed to get container status \"f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5\": rpc error: code = NotFound desc = could not find container \"f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5\": container with ID starting with f237c394670c2ed2a427fbbb5e8c410185bd118d3c1526f67208279042fb76d5 not found: ID does not exist" Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.255490 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.313777 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.321374 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.702652 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" path="/var/lib/kubelet/pods/772cd3d6-daa4-4494-9ce8-6182e011fbc4/volumes" Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.706601 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" path="/var/lib/kubelet/pods/c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f/volumes" Nov 28 15:45:46 crc kubenswrapper[4884]: I1128 15:45:46.707716 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" path="/var/lib/kubelet/pods/ce9413d4-1548-44f6-a50d-dcae9284f674/volumes" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.549956 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550553 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550581 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550603 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="init" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550615 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="init" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550633 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550646 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550666 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="registry-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550678 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="registry-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550694 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550706 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550726 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550738 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550755 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38ca29b2-4263-4cb5-ba00-fe95430cf7f6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550767 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="38ca29b2-4263-4cb5-ba00-fe95430cf7f6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550790 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550802 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550820 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="rsync" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550831 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="rsync" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550843 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550855 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550875 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="extract-utilities" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550888 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="extract-utilities" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550911 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550923 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550935 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550947 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550962 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.550973 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.550995 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="ovn-northd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551008 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="ovn-northd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551026 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551039 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551057 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551068 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551081 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="ovsdbserver-sb" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551129 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="ovsdbserver-sb" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551149 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="swift-recon-cron" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551160 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="swift-recon-cron" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551180 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551191 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551205 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551216 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551231 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-notification-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551243 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-notification-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551259 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551274 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551297 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551309 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551333 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69cf456a-4993-4bd5-b745-5d73a65b6b91" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551345 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="69cf456a-4993-4bd5-b745-5d73a65b6b91" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551372 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551384 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551401 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551414 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551433 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90481f2a-55d7-459f-9e46-2ca816951a8d" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551445 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="90481f2a-55d7-459f-9e46-2ca816951a8d" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551468 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="cinder-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551480 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="cinder-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551501 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551512 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551529 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="585208a7-186b-40da-a7af-be303777e77c" containerName="kube-state-metrics" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551541 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="585208a7-186b-40da-a7af-be303777e77c" containerName="kube-state-metrics" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551559 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551571 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551592 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-expirer" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551604 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-expirer" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551620 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="959ac7da-0d4b-48f3-84af-2650cd91c143" containerName="memcached" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551632 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="959ac7da-0d4b-48f3-84af-2650cd91c143" containerName="memcached" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551650 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551662 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551677 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551689 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551704 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="setup-container" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551717 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="setup-container" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551733 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551745 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551761 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551773 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551792 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551803 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551825 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="setup-container" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551837 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="setup-container" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551850 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="ovsdbserver-nb" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551862 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="ovsdbserver-nb" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551875 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551886 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551903 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551914 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551928 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551940 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551952 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551963 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.551979 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" containerName="nova-cell1-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.551993 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" containerName="nova-cell1-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552016 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552029 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552049 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552061 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552076 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552110 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552129 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552142 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552165 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerName="nova-cell0-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552177 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerName="nova-cell0-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552197 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-reaper" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552208 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-reaper" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552230 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="mysql-bootstrap" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552242 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="mysql-bootstrap" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552255 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552266 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552285 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552297 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552310 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server-init" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552322 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server-init" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552344 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552356 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552375 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91405fb1-1a28-4fb4-9548-84c4b1797d45" containerName="keystone-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552386 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91405fb1-1a28-4fb4-9548-84c4b1797d45" containerName="keystone-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552405 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552417 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552430 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552442 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552458 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552473 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-server" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552485 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="extract-content" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552500 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="extract-content" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552529 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="sg-core" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552543 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="sg-core" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552563 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f633ca3e-03a6-4c24-9783-94fb61ed0ade" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552577 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f633ca3e-03a6-4c24-9783-94fb61ed0ade" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552595 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="mysql-bootstrap" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552612 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="mysql-bootstrap" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552634 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="dnsmasq-dns" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552650 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="dnsmasq-dns" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552675 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552691 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552715 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552731 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552753 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552768 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552788 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-central-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552800 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-central-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552815 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552828 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552847 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552860 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552876 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552888 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552907 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552918 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552933 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552947 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552963 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.552974 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-api" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.552995 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553008 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: E1128 15:45:47.553028 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="probe" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553039 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="probe" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553312 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="ovn-northd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553332 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553347 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553364 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="probe" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553381 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553399 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553424 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c2dcae8-7c76-46e9-90d4-afd8af5f474a" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553441 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdd604f8-af2a-40bb-b85a-14d7a4eeb000" containerName="nova-cell0-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553458 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="006d76b7-d405-4056-a55b-f01661cde456" containerName="dnsmasq-dns" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553476 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553498 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553520 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-metadata" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553537 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="769485f5-63dc-4d17-9bfb-3006d99e2616" containerName="cinder-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553556 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553572 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="ovsdbserver-sb" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553588 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f314d326-c20e-41cb-8fb5-a608d002b170" containerName="ovn-controller" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553608 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553621 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553642 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="swift-recon-cron" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553656 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-notification-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553705 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="proxy-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553728 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="69cf456a-4993-4bd5-b745-5d73a65b6b91" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553741 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b1d00ac-0efe-45af-9366-f5d302b86ccb" containerName="galera" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553762 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-expirer" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553773 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553790 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-replicator" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553810 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-reaper" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553824 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="sg-core" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553838 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553852 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0264205a-7b80-4df8-8d57-3923074f4a59" containerName="nova-scheduler-scheduler" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553874 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553890 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="454fa1ac-19ca-4c44-b0fb-2c30039524a7" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553906 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553918 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553933 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e3f8ba-4fe2-433c-85d4-b30c488af3cd" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553948 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0e2713c-5c3d-457f-b444-122039f003d3" containerName="glance-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553964 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.553984 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554008 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c5a29a-fbc0-4c91-a3a3-e4d96c911c98" containerName="ceilometer-central-agent" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554022 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dba6a6f-821c-4897-b88d-5cca9482f4fa" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554037 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebf7deee-37f0-4b47-8e10-3027e7009916" containerName="registry-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554051 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49c9a390-4563-4e1e-a109-ff673e664409" containerName="barbican-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554073 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="90481f2a-55d7-459f-9e46-2ca816951a8d" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554116 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cdac902-b0e5-4f41-923c-07241207d730" containerName="openstack-network-exporter" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554135 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554158 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bc5134b-7bda-47e0-86a3-b4f374e842e6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554172 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="object-auditor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554190 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="458e9b5e-8446-4bfa-ba33-12a3a32c74ea" containerName="neutron-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554207 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c33f5fde-1bf4-406f-ad32-a1ed3ee3db9f" containerName="ovsdbserver-nb" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554228 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554243 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d1f5739-e8a4-4081-8104-57dfc250861a" containerName="glance-httpd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554262 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb0b621b-d100-4ec9-b815-13e67489a2ac" containerName="nova-api-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554278 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91405fb1-1a28-4fb4-9548-84c4b1797d45" containerName="keystone-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554297 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="39e34f52-669e-4086-94ef-a38542dbc6ea" containerName="nova-metadata-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554319 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6237eb73-294e-4e4b-a619-e669061a1b5b" containerName="placement-log" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554336 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="585208a7-186b-40da-a7af-be303777e77c" containerName="kube-state-metrics" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554348 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="38ca29b2-4263-4cb5-ba00-fe95430cf7f6" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554370 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="rsync" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554382 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f788ec00-6116-4f7f-ac08-21623599090d" containerName="proxy-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554402 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="container-updater" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554419 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovs-vswitchd" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554432 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a5d81bd-3b99-4aa6-82dc-2969295dce39" containerName="rabbitmq" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554446 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c94e18c-15c4-4ef6-929f-c1941dbd3919" containerName="cinder-api" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554460 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="959ac7da-0d4b-48f3-84af-2650cd91c143" containerName="memcached" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554475 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="afabf28f-eb82-4439-aa4f-3154e1007bf5" containerName="barbican-worker" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554491 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e2007d-c536-47f5-9d03-92069c96f654" containerName="nova-cell1-conductor-conductor" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554512 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce9413d4-1548-44f6-a50d-dcae9284f674" containerName="ovsdb-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554526 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29b378e-a008-4903-9ebd-2570d37d8a11" containerName="barbican-keystone-listener" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554544 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="772cd3d6-daa4-4494-9ce8-6182e011fbc4" containerName="account-server" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554561 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="08c698b7-a3f8-4639-8237-a8e005ae2669" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.554575 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f633ca3e-03a6-4c24-9783-94fb61ed0ade" containerName="mariadb-account-delete" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.556508 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.557214 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.639924 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.640024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.640058 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k5kf\" (UniqueName: \"kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.742141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.742201 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k5kf\" (UniqueName: \"kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.742242 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.742277 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.742649 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.767437 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k5kf\" (UniqueName: \"kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf\") pod \"redhat-marketplace-mxvwh\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:47 crc kubenswrapper[4884]: I1128 15:45:47.890375 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:48 crc kubenswrapper[4884]: I1128 15:45:48.175019 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:45:48 crc kubenswrapper[4884]: I1128 15:45:48.292246 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerStarted","Data":"2433d414981fedcef1d242bd5dc290cf644b69b925e12dfbb32b8cf5e18c91db"} Nov 28 15:45:49 crc kubenswrapper[4884]: I1128 15:45:49.305245 4884 generic.go:334] "Generic (PLEG): container finished" podID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerID="d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74" exitCode=0 Nov 28 15:45:49 crc kubenswrapper[4884]: I1128 15:45:49.305286 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerDied","Data":"d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74"} Nov 28 15:45:49 crc kubenswrapper[4884]: I1128 15:45:49.480632 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podf788ec00-6116-4f7f-ac08-21623599090d"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podf788ec00-6116-4f7f-ac08-21623599090d] : Timed out while waiting for systemd to remove kubepods-besteffort-podf788ec00_6116_4f7f_ac08_21623599090d.slice" Nov 28 15:45:49 crc kubenswrapper[4884]: E1128 15:45:49.480890 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podf788ec00-6116-4f7f-ac08-21623599090d] : unable to destroy cgroup paths for cgroup [kubepods besteffort podf788ec00-6116-4f7f-ac08-21623599090d] : Timed out while waiting for systemd to remove kubepods-besteffort-podf788ec00_6116_4f7f_ac08_21623599090d.slice" pod="openstack/swift-proxy-988fdb959-xkp66" podUID="f788ec00-6116-4f7f-ac08-21623599090d" Nov 28 15:45:50 crc kubenswrapper[4884]: I1128 15:45:50.316418 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-988fdb959-xkp66" Nov 28 15:45:50 crc kubenswrapper[4884]: I1128 15:45:50.372192 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:45:50 crc kubenswrapper[4884]: I1128 15:45:50.382653 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-988fdb959-xkp66"] Nov 28 15:45:50 crc kubenswrapper[4884]: I1128 15:45:50.699212 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f788ec00-6116-4f7f-ac08-21623599090d" path="/var/lib/kubelet/pods/f788ec00-6116-4f7f-ac08-21623599090d/volumes" Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.243175 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.243384 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.243570 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.245477 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.245736 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" gracePeriod=600 Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.340509 4884 generic.go:334] "Generic (PLEG): container finished" podID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerID="08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6" exitCode=0 Nov 28 15:45:51 crc kubenswrapper[4884]: I1128 15:45:51.340580 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerDied","Data":"08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6"} Nov 28 15:45:51 crc kubenswrapper[4884]: E1128 15:45:51.377751 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.357650 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerStarted","Data":"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf"} Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.362042 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" exitCode=0 Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.362152 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d"} Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.362220 4884 scope.go:117] "RemoveContainer" containerID="93d6c6fb2978a8eb8e8488bd25799dd7994dbdfad1cd1ea19ac8f2b6e6f9e8d0" Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.362891 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:45:52 crc kubenswrapper[4884]: E1128 15:45:52.363468 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:45:52 crc kubenswrapper[4884]: I1128 15:45:52.386632 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mxvwh" podStartSLOduration=2.746803796 podStartE2EDuration="5.38660781s" podCreationTimestamp="2025-11-28 15:45:47 +0000 UTC" firstStartedPulling="2025-11-28 15:45:49.309220022 +0000 UTC m=+1588.872003823" lastFinishedPulling="2025-11-28 15:45:51.949023996 +0000 UTC m=+1591.511807837" observedRunningTime="2025-11-28 15:45:52.383001282 +0000 UTC m=+1591.945785103" watchObservedRunningTime="2025-11-28 15:45:52.38660781 +0000 UTC m=+1591.949391611" Nov 28 15:45:57 crc kubenswrapper[4884]: I1128 15:45:57.890843 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:57 crc kubenswrapper[4884]: I1128 15:45:57.891415 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:57 crc kubenswrapper[4884]: I1128 15:45:57.968568 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:58 crc kubenswrapper[4884]: I1128 15:45:58.514885 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:45:58 crc kubenswrapper[4884]: I1128 15:45:58.590511 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:46:00 crc kubenswrapper[4884]: I1128 15:46:00.463689 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mxvwh" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="registry-server" containerID="cri-o://127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf" gracePeriod=2 Nov 28 15:46:00 crc kubenswrapper[4884]: I1128 15:46:00.941886 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.048338 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content\") pod \"daf6ab24-498f-42b4-9a7f-72afedd98043\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.048457 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k5kf\" (UniqueName: \"kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf\") pod \"daf6ab24-498f-42b4-9a7f-72afedd98043\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.048508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities\") pod \"daf6ab24-498f-42b4-9a7f-72afedd98043\" (UID: \"daf6ab24-498f-42b4-9a7f-72afedd98043\") " Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.049706 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities" (OuterVolumeSpecName: "utilities") pod "daf6ab24-498f-42b4-9a7f-72afedd98043" (UID: "daf6ab24-498f-42b4-9a7f-72afedd98043"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.068657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf" (OuterVolumeSpecName: "kube-api-access-2k5kf") pod "daf6ab24-498f-42b4-9a7f-72afedd98043" (UID: "daf6ab24-498f-42b4-9a7f-72afedd98043"). InnerVolumeSpecName "kube-api-access-2k5kf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.150404 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k5kf\" (UniqueName: \"kubernetes.io/projected/daf6ab24-498f-42b4-9a7f-72afedd98043-kube-api-access-2k5kf\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.150448 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.264291 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "daf6ab24-498f-42b4-9a7f-72afedd98043" (UID: "daf6ab24-498f-42b4-9a7f-72afedd98043"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.353499 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daf6ab24-498f-42b4-9a7f-72afedd98043-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.477194 4884 generic.go:334] "Generic (PLEG): container finished" podID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerID="127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf" exitCode=0 Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.477233 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerDied","Data":"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf"} Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.477261 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mxvwh" event={"ID":"daf6ab24-498f-42b4-9a7f-72afedd98043","Type":"ContainerDied","Data":"2433d414981fedcef1d242bd5dc290cf644b69b925e12dfbb32b8cf5e18c91db"} Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.477278 4884 scope.go:117] "RemoveContainer" containerID="127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.477353 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mxvwh" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.512018 4884 scope.go:117] "RemoveContainer" containerID="08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.517532 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.524219 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mxvwh"] Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.545453 4884 scope.go:117] "RemoveContainer" containerID="d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.593929 4884 scope.go:117] "RemoveContainer" containerID="127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf" Nov 28 15:46:01 crc kubenswrapper[4884]: E1128 15:46:01.594673 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf\": container with ID starting with 127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf not found: ID does not exist" containerID="127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.594763 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf"} err="failed to get container status \"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf\": rpc error: code = NotFound desc = could not find container \"127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf\": container with ID starting with 127475a1db652c7552f10faedcf896c1bab5007d83e22e093101bcd2d26889cf not found: ID does not exist" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.594801 4884 scope.go:117] "RemoveContainer" containerID="08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6" Nov 28 15:46:01 crc kubenswrapper[4884]: E1128 15:46:01.595177 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6\": container with ID starting with 08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6 not found: ID does not exist" containerID="08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.595207 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6"} err="failed to get container status \"08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6\": rpc error: code = NotFound desc = could not find container \"08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6\": container with ID starting with 08eca75f2766805077349d50fc6d1563f9081a6928a21fb5cd4c6511c24bb1d6 not found: ID does not exist" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.595224 4884 scope.go:117] "RemoveContainer" containerID="d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74" Nov 28 15:46:01 crc kubenswrapper[4884]: E1128 15:46:01.595518 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74\": container with ID starting with d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74 not found: ID does not exist" containerID="d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74" Nov 28 15:46:01 crc kubenswrapper[4884]: I1128 15:46:01.595547 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74"} err="failed to get container status \"d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74\": rpc error: code = NotFound desc = could not find container \"d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74\": container with ID starting with d164f6a86671d58fa194af02d608d56ada6a28db66078c371040b75c3ef63b74 not found: ID does not exist" Nov 28 15:46:02 crc kubenswrapper[4884]: I1128 15:46:02.699821 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" path="/var/lib/kubelet/pods/daf6ab24-498f-42b4-9a7f-72afedd98043/volumes" Nov 28 15:46:07 crc kubenswrapper[4884]: I1128 15:46:07.687872 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:46:07 crc kubenswrapper[4884]: E1128 15:46:07.688713 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:46:20 crc kubenswrapper[4884]: I1128 15:46:20.695244 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:46:20 crc kubenswrapper[4884]: E1128 15:46:20.696890 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:46:25 crc kubenswrapper[4884]: I1128 15:46:25.251217 4884 scope.go:117] "RemoveContainer" containerID="1d304903df4db4958b0ffe4823917fe92702684454004f9d45c4f96d73b84b9a" Nov 28 15:46:25 crc kubenswrapper[4884]: I1128 15:46:25.279965 4884 scope.go:117] "RemoveContainer" containerID="9dd9057f62ee5c9c6275f559b3176fdfa300b26d1bbfeff07c5f5c7e8c62c3c1" Nov 28 15:46:25 crc kubenswrapper[4884]: I1128 15:46:25.338361 4884 scope.go:117] "RemoveContainer" containerID="c281696f1781e0ccfc031988e9712f4a3ced548d63c03915d8d3afac3ba9f01f" Nov 28 15:46:25 crc kubenswrapper[4884]: I1128 15:46:25.392241 4884 scope.go:117] "RemoveContainer" containerID="2f8389f9ec7174a8e9d82dc2ac13c93505a07fdd6567c79634dda479b20e57b0" Nov 28 15:46:34 crc kubenswrapper[4884]: I1128 15:46:34.688806 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:46:34 crc kubenswrapper[4884]: E1128 15:46:34.689619 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:46:47 crc kubenswrapper[4884]: I1128 15:46:47.690245 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:46:47 crc kubenswrapper[4884]: E1128 15:46:47.691203 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:47:00 crc kubenswrapper[4884]: I1128 15:47:00.694884 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:47:00 crc kubenswrapper[4884]: E1128 15:47:00.695613 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:47:13 crc kubenswrapper[4884]: I1128 15:47:13.688660 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:47:13 crc kubenswrapper[4884]: E1128 15:47:13.689555 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:47:24 crc kubenswrapper[4884]: I1128 15:47:24.689742 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:47:24 crc kubenswrapper[4884]: E1128 15:47:24.690771 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.581377 4884 scope.go:117] "RemoveContainer" containerID="2ac986d672399f8208eab374707fb3a20c8d05e42870714340327e102a6f8c29" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.638219 4884 scope.go:117] "RemoveContainer" containerID="0ead29ea69f9cb39624ccf2cdc69491b89fbdb757e4248352d5579bf98013460" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.685474 4884 scope.go:117] "RemoveContainer" containerID="6af27ca64073db0df88f0ae368c4d12f0fe58f42eb05597de6f96faf030eabdc" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.715728 4884 scope.go:117] "RemoveContainer" containerID="e502b73487d2bb57b1e75ef0a8faf8c996a390aa1352bf957bb16adf8c8bc21b" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.744082 4884 scope.go:117] "RemoveContainer" containerID="a90ee2ce33b2e09b2454ee0a10a11d83c9be70df7298ec7541b4a79227104da6" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.773495 4884 scope.go:117] "RemoveContainer" containerID="fdbae1a079e558ce0feb21ed3c6269335bffc9b787b3cc81facbc980945f77ff" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.790330 4884 scope.go:117] "RemoveContainer" containerID="c31ec74fdd2fed531250dfc9d5fc9e3011db90e8ed2175936a67f679bcb55eb2" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.808289 4884 scope.go:117] "RemoveContainer" containerID="bc5b536cad821d8563f531b0ae72f4474004545925faa5ffbb706c06cc0347bc" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.844409 4884 scope.go:117] "RemoveContainer" containerID="72697e6b2a753930910f59bee8df04dae210b43500f84698945298c337f4adf6" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.864045 4884 scope.go:117] "RemoveContainer" containerID="e60d16879a4e31788c2d4d904429d05b12572a36e15203f8e2455f81a2057497" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.879718 4884 scope.go:117] "RemoveContainer" containerID="2d1411fe8264af95e0eb8318d16f2b0fdd715b64f44342f8230637454ca0ab1c" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.914488 4884 scope.go:117] "RemoveContainer" containerID="4e35f8f692380c8cb19ad1be2fbac1bc3ab5001506118e1d8fa4d56ebc61177a" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.936585 4884 scope.go:117] "RemoveContainer" containerID="28ef434d21a23e4515e4b14857316f3404e4fe7b7948ccd1d761ac4aa596a8b7" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.958989 4884 scope.go:117] "RemoveContainer" containerID="d6114c43021b4cdc8ee40a171d9cdb567ac02848a9ade6dd09f970c6a21fc709" Nov 28 15:47:25 crc kubenswrapper[4884]: I1128 15:47:25.990248 4884 scope.go:117] "RemoveContainer" containerID="c35862251dd7fa84424b1efdd78c3b8680af8eed8b5adf29edc19bd3bd2ec964" Nov 28 15:47:26 crc kubenswrapper[4884]: I1128 15:47:26.015866 4884 scope.go:117] "RemoveContainer" containerID="b672e9d20e9ae21776b687169a5546d95f3c175cd1a50b1e07fe0423b506e7fa" Nov 28 15:47:39 crc kubenswrapper[4884]: I1128 15:47:39.688473 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:47:39 crc kubenswrapper[4884]: E1128 15:47:39.689758 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:47:51 crc kubenswrapper[4884]: I1128 15:47:51.688164 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:47:51 crc kubenswrapper[4884]: E1128 15:47:51.689833 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:48:03 crc kubenswrapper[4884]: I1128 15:48:03.688905 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:48:03 crc kubenswrapper[4884]: E1128 15:48:03.690185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:48:18 crc kubenswrapper[4884]: I1128 15:48:18.689005 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:48:18 crc kubenswrapper[4884]: E1128 15:48:18.690120 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.248382 4884 scope.go:117] "RemoveContainer" containerID="96012b560b8646ae95f8d578e115efcf936c4cf2e7d77a3d610028cab2e4960e" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.314892 4884 scope.go:117] "RemoveContainer" containerID="6c624f23934a2757b9d0fda39e8ae22b7e58a88676450e0d3ad4c24f2ee510f7" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.346479 4884 scope.go:117] "RemoveContainer" containerID="1ed4da5ca2784ad18d7ac179c6bd97cca4c984efccfcce500f84ecafa684d052" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.375221 4884 scope.go:117] "RemoveContainer" containerID="edd37f72384b03eb5519315a8e0249d6ae9ad1a1807125baf938035d6cdfba74" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.420274 4884 scope.go:117] "RemoveContainer" containerID="3f5da2b67edbeba186939a6c7b896f0e81ce009bf1392ad45ba9fd37e154916b" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.452259 4884 scope.go:117] "RemoveContainer" containerID="a13fa47dc18a9e97c12845d9bf327b2a99237952371e8e2be117fbd3fd157b73" Nov 28 15:48:26 crc kubenswrapper[4884]: I1128 15:48:26.471894 4884 scope.go:117] "RemoveContainer" containerID="cac4ad84ecf6419d28c8c00201ea020c4f4d4f1a1a5c60443d2e5da27ea51d0f" Nov 28 15:48:31 crc kubenswrapper[4884]: I1128 15:48:31.688570 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:48:31 crc kubenswrapper[4884]: E1128 15:48:31.689561 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:48:44 crc kubenswrapper[4884]: I1128 15:48:44.688490 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:48:44 crc kubenswrapper[4884]: E1128 15:48:44.689285 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:48:56 crc kubenswrapper[4884]: I1128 15:48:56.688871 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:48:56 crc kubenswrapper[4884]: E1128 15:48:56.689717 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:49:10 crc kubenswrapper[4884]: I1128 15:49:10.692024 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:49:10 crc kubenswrapper[4884]: E1128 15:49:10.692722 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:49:21 crc kubenswrapper[4884]: I1128 15:49:21.688758 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:49:21 crc kubenswrapper[4884]: E1128 15:49:21.689795 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.436434 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:22 crc kubenswrapper[4884]: E1128 15:49:22.436864 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="extract-utilities" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.436882 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="extract-utilities" Nov 28 15:49:22 crc kubenswrapper[4884]: E1128 15:49:22.436910 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="extract-content" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.436919 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="extract-content" Nov 28 15:49:22 crc kubenswrapper[4884]: E1128 15:49:22.436939 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="registry-server" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.436948 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="registry-server" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.437156 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="daf6ab24-498f-42b4-9a7f-72afedd98043" containerName="registry-server" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.439318 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.442388 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.586613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.586941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26dv5\" (UniqueName: \"kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.586993 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.688866 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.688971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26dv5\" (UniqueName: \"kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.689029 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.690538 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.692145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.716864 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26dv5\" (UniqueName: \"kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5\") pod \"certified-operators-tljsl\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:22 crc kubenswrapper[4884]: I1128 15:49:22.930559 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:23 crc kubenswrapper[4884]: I1128 15:49:23.413302 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:23 crc kubenswrapper[4884]: I1128 15:49:23.507304 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerStarted","Data":"5587918f78fbcd7ff79f1a3488985ec057ff0a6ed5e1543383399b3a35a535dc"} Nov 28 15:49:24 crc kubenswrapper[4884]: I1128 15:49:24.517832 4884 generic.go:334] "Generic (PLEG): container finished" podID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerID="0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b" exitCode=0 Nov 28 15:49:24 crc kubenswrapper[4884]: I1128 15:49:24.517984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerDied","Data":"0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b"} Nov 28 15:49:24 crc kubenswrapper[4884]: I1128 15:49:24.522288 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.548825 4884 generic.go:334] "Generic (PLEG): container finished" podID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerID="45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051" exitCode=0 Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.549078 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerDied","Data":"45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051"} Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.562822 4884 scope.go:117] "RemoveContainer" containerID="e629d92bbf1f5998319592b655efddbcc1184d06484b8cc9eedf20db4d29406b" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.609771 4884 scope.go:117] "RemoveContainer" containerID="d6c08e0e8c4eb6f4610c81be9b7d6484ee5b9c70ede940abe57165a86cb85e5e" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.637634 4884 scope.go:117] "RemoveContainer" containerID="364c339d6ef9ee657fe476484782c8687c2924daa4dd5ab62181131d3568777e" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.670814 4884 scope.go:117] "RemoveContainer" containerID="1dba7e621dc762e39efb79478b3479dc3c3f7b54537bead34f7ee2f18daf1ebc" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.700041 4884 scope.go:117] "RemoveContainer" containerID="a88799f33ef691b4ee8915163ec9d18f0972fe6e1536003ad001881fb31dca5f" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.728675 4884 scope.go:117] "RemoveContainer" containerID="078b8f4a791acff651b4144da1d83ca6c40df0796a420224b04ec8f504b1c85b" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.787262 4884 scope.go:117] "RemoveContainer" containerID="db66a63c6b73949a4a381fd8c6a6996fc1d34737761429ce5548fc962cd511cd" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.813123 4884 scope.go:117] "RemoveContainer" containerID="7a2d5c88d65fad02b9525e7a41b69b86653082bc1c05fe974f5231b1c252b22a" Nov 28 15:49:26 crc kubenswrapper[4884]: I1128 15:49:26.842759 4884 scope.go:117] "RemoveContainer" containerID="deec2653ca05ea2b2431e7f8f5e25dfcc18dcdf5ee831af2554036dbfce9676e" Nov 28 15:49:27 crc kubenswrapper[4884]: I1128 15:49:27.560434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerStarted","Data":"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd"} Nov 28 15:49:27 crc kubenswrapper[4884]: I1128 15:49:27.582897 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tljsl" podStartSLOduration=3.01734661 podStartE2EDuration="5.582878083s" podCreationTimestamp="2025-11-28 15:49:22 +0000 UTC" firstStartedPulling="2025-11-28 15:49:24.522011718 +0000 UTC m=+1804.084795519" lastFinishedPulling="2025-11-28 15:49:27.087543181 +0000 UTC m=+1806.650326992" observedRunningTime="2025-11-28 15:49:27.576660083 +0000 UTC m=+1807.139443894" watchObservedRunningTime="2025-11-28 15:49:27.582878083 +0000 UTC m=+1807.145661884" Nov 28 15:49:32 crc kubenswrapper[4884]: I1128 15:49:32.932016 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:32 crc kubenswrapper[4884]: I1128 15:49:32.932466 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:33 crc kubenswrapper[4884]: I1128 15:49:33.009557 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:33 crc kubenswrapper[4884]: I1128 15:49:33.669656 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:33 crc kubenswrapper[4884]: I1128 15:49:33.740979 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:34 crc kubenswrapper[4884]: I1128 15:49:34.688612 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:49:34 crc kubenswrapper[4884]: E1128 15:49:34.689757 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:49:35 crc kubenswrapper[4884]: I1128 15:49:35.649188 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tljsl" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="registry-server" containerID="cri-o://5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd" gracePeriod=2 Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.095909 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.284662 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content\") pod \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.284762 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities\") pod \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.284854 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26dv5\" (UniqueName: \"kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5\") pod \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\" (UID: \"46578fa5-689e-4dd7-a7aa-a0a7bee103ad\") " Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.286327 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities" (OuterVolumeSpecName: "utilities") pod "46578fa5-689e-4dd7-a7aa-a0a7bee103ad" (UID: "46578fa5-689e-4dd7-a7aa-a0a7bee103ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.294351 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5" (OuterVolumeSpecName: "kube-api-access-26dv5") pod "46578fa5-689e-4dd7-a7aa-a0a7bee103ad" (UID: "46578fa5-689e-4dd7-a7aa-a0a7bee103ad"). InnerVolumeSpecName "kube-api-access-26dv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.385955 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.386000 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26dv5\" (UniqueName: \"kubernetes.io/projected/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-kube-api-access-26dv5\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.415658 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46578fa5-689e-4dd7-a7aa-a0a7bee103ad" (UID: "46578fa5-689e-4dd7-a7aa-a0a7bee103ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.487033 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46578fa5-689e-4dd7-a7aa-a0a7bee103ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.662890 4884 generic.go:334] "Generic (PLEG): container finished" podID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerID="5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd" exitCode=0 Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.662961 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerDied","Data":"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd"} Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.663006 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tljsl" event={"ID":"46578fa5-689e-4dd7-a7aa-a0a7bee103ad","Type":"ContainerDied","Data":"5587918f78fbcd7ff79f1a3488985ec057ff0a6ed5e1543383399b3a35a535dc"} Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.663008 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tljsl" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.663035 4884 scope.go:117] "RemoveContainer" containerID="5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.691072 4884 scope.go:117] "RemoveContainer" containerID="45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.716786 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.722263 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tljsl"] Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.726053 4884 scope.go:117] "RemoveContainer" containerID="0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.766967 4884 scope.go:117] "RemoveContainer" containerID="5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd" Nov 28 15:49:36 crc kubenswrapper[4884]: E1128 15:49:36.767497 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd\": container with ID starting with 5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd not found: ID does not exist" containerID="5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.767534 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd"} err="failed to get container status \"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd\": rpc error: code = NotFound desc = could not find container \"5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd\": container with ID starting with 5080ace0fbf4dff1750e9c8b73871b149ab6747527150b1ce7b822608309adbd not found: ID does not exist" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.767554 4884 scope.go:117] "RemoveContainer" containerID="45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051" Nov 28 15:49:36 crc kubenswrapper[4884]: E1128 15:49:36.767915 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051\": container with ID starting with 45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051 not found: ID does not exist" containerID="45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.767945 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051"} err="failed to get container status \"45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051\": rpc error: code = NotFound desc = could not find container \"45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051\": container with ID starting with 45b591d46b28bb3b7afc77853a4eca6878ef61bb5a9dd829eaf2ea9f7d566051 not found: ID does not exist" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.767962 4884 scope.go:117] "RemoveContainer" containerID="0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b" Nov 28 15:49:36 crc kubenswrapper[4884]: E1128 15:49:36.768249 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b\": container with ID starting with 0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b not found: ID does not exist" containerID="0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b" Nov 28 15:49:36 crc kubenswrapper[4884]: I1128 15:49:36.768273 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b"} err="failed to get container status \"0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b\": rpc error: code = NotFound desc = could not find container \"0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b\": container with ID starting with 0c164535a3f14f33dae21552070b12afdf479f0d74817134675d44f0013a593b not found: ID does not exist" Nov 28 15:49:38 crc kubenswrapper[4884]: I1128 15:49:38.698312 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" path="/var/lib/kubelet/pods/46578fa5-689e-4dd7-a7aa-a0a7bee103ad/volumes" Nov 28 15:49:49 crc kubenswrapper[4884]: I1128 15:49:49.688237 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:49:49 crc kubenswrapper[4884]: E1128 15:49:49.689319 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:50:01 crc kubenswrapper[4884]: I1128 15:50:01.688805 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:50:01 crc kubenswrapper[4884]: E1128 15:50:01.689822 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:50:12 crc kubenswrapper[4884]: I1128 15:50:12.688826 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:50:12 crc kubenswrapper[4884]: E1128 15:50:12.690189 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.002972 4884 scope.go:117] "RemoveContainer" containerID="763ed99f997eeeb0d08695603ce8409930b1a454f8319f482b75714ea0827268" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.044904 4884 scope.go:117] "RemoveContainer" containerID="c60a6f26a660bec0bac432aab6278c3112868da8289bd4e2d9af5e68fee5fb95" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.106289 4884 scope.go:117] "RemoveContainer" containerID="7c3ae662b3a86550433fd2c1c5a7ca2abc72a8f259452abb679aab5d0f1f8da2" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.167133 4884 scope.go:117] "RemoveContainer" containerID="d3080a89a34206a9653107776ef014eae527615ab0f6f525bebc4116890adea8" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.213894 4884 scope.go:117] "RemoveContainer" containerID="32ba7e16f2dea6c4249c539765a6254e245e09abcbbe10c696d6b4f63e80584d" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.236737 4884 scope.go:117] "RemoveContainer" containerID="423fc717ca99c44555115e638f9599ae77f0b956b2f33212fa986e4d737e108b" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.256580 4884 scope.go:117] "RemoveContainer" containerID="831214ed000ec9e6d3193367daa0b92b3bda0dd88e008ff7d74c3f2291dbc3a8" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.275737 4884 scope.go:117] "RemoveContainer" containerID="b42cabfe2b0f40870c93167cc022af22181c4c8400a649d0268e8625b2010d96" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.302975 4884 scope.go:117] "RemoveContainer" containerID="356c6a181f8baa4e4f7bc613da8e659e49bfc056bb3d7d30f338ceda3391c030" Nov 28 15:50:27 crc kubenswrapper[4884]: I1128 15:50:27.689287 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:50:27 crc kubenswrapper[4884]: E1128 15:50:27.689587 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:50:42 crc kubenswrapper[4884]: I1128 15:50:42.688707 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:50:42 crc kubenswrapper[4884]: E1128 15:50:42.689804 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:50:57 crc kubenswrapper[4884]: I1128 15:50:57.688292 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:50:58 crc kubenswrapper[4884]: I1128 15:50:58.475987 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908"} Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.497800 4884 scope.go:117] "RemoveContainer" containerID="8807da65f797b04894447dbc2417ad0e7fe1821650be3b9a2879db50840e4859" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.529630 4884 scope.go:117] "RemoveContainer" containerID="44cd99c65316901d33074d95c92f5d515715e45a10031091ae4584eaeeb68f07" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.563236 4884 scope.go:117] "RemoveContainer" containerID="0dc318d6697c8c4b07f99c224c62ddcb0dac46ebed54243f1880cec3c2d5fd1c" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.604561 4884 scope.go:117] "RemoveContainer" containerID="9cd2eb76fc50e8e3faf8777ccbf612add4a055c39bdf3a15fe9e7894fdd3cb9e" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.627005 4884 scope.go:117] "RemoveContainer" containerID="cc77c44684f7d1ced03406af74ae901e41656eca80e8b630852276ad4fe96d9d" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.651014 4884 scope.go:117] "RemoveContainer" containerID="5c0b89ee52ef4a03977cf2458b6c9a090c9b06c0672150855253eecc2350895c" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.681424 4884 scope.go:117] "RemoveContainer" containerID="dd3c3fcdf3aec789a9e7b6153b7bf3f8eaa0ccc30670ab97373412c67d5f4cd7" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.705509 4884 scope.go:117] "RemoveContainer" containerID="5a95d190e562709b097e62fe0f40779fc9e24384a8d1895edcab6d08bbb14472" Nov 28 15:51:27 crc kubenswrapper[4884]: I1128 15:51:27.721565 4884 scope.go:117] "RemoveContainer" containerID="fcaf2c66276780f82ef88c966fdd309cd66bab9591912e6c0898a544d1dd5113" Nov 28 15:53:21 crc kubenswrapper[4884]: I1128 15:53:21.243154 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:53:21 crc kubenswrapper[4884]: I1128 15:53:21.243972 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:53:51 crc kubenswrapper[4884]: I1128 15:53:51.242796 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:53:51 crc kubenswrapper[4884]: I1128 15:53:51.243344 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:54:21 crc kubenswrapper[4884]: I1128 15:54:21.242894 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:54:21 crc kubenswrapper[4884]: I1128 15:54:21.243606 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:54:21 crc kubenswrapper[4884]: I1128 15:54:21.243684 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:54:21 crc kubenswrapper[4884]: I1128 15:54:21.244640 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:54:21 crc kubenswrapper[4884]: I1128 15:54:21.244771 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908" gracePeriod=600 Nov 28 15:54:22 crc kubenswrapper[4884]: I1128 15:54:22.383130 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908" exitCode=0 Nov 28 15:54:22 crc kubenswrapper[4884]: I1128 15:54:22.383241 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908"} Nov 28 15:54:22 crc kubenswrapper[4884]: I1128 15:54:22.383811 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968"} Nov 28 15:54:22 crc kubenswrapper[4884]: I1128 15:54:22.383846 4884 scope.go:117] "RemoveContainer" containerID="4a9199e43a0bb82dfcb48ea75ec5239d064761b818496fa39d5eae1e5d70989d" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.891265 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:54:48 crc kubenswrapper[4884]: E1128 15:54:48.892424 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="registry-server" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.892445 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="registry-server" Nov 28 15:54:48 crc kubenswrapper[4884]: E1128 15:54:48.892476 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="extract-content" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.892487 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="extract-content" Nov 28 15:54:48 crc kubenswrapper[4884]: E1128 15:54:48.892507 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="extract-utilities" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.892518 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="extract-utilities" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.892752 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="46578fa5-689e-4dd7-a7aa-a0a7bee103ad" containerName="registry-server" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.894129 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.902856 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.903004 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4wfr\" (UniqueName: \"kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.903281 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:48 crc kubenswrapper[4884]: I1128 15:54:48.910461 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.004362 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4wfr\" (UniqueName: \"kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.004732 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.004774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.005658 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.006155 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.023884 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4wfr\" (UniqueName: \"kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr\") pod \"redhat-operators-8l7ct\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.264285 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:49 crc kubenswrapper[4884]: I1128 15:54:49.691112 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:54:50 crc kubenswrapper[4884]: I1128 15:54:50.645978 4884 generic.go:334] "Generic (PLEG): container finished" podID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerID="1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05" exitCode=0 Nov 28 15:54:50 crc kubenswrapper[4884]: I1128 15:54:50.646086 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerDied","Data":"1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05"} Nov 28 15:54:50 crc kubenswrapper[4884]: I1128 15:54:50.646421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerStarted","Data":"0a5ba183edb2a99f41e0c4d96b86d222523004fee2739837f4a5e2105a819565"} Nov 28 15:54:50 crc kubenswrapper[4884]: I1128 15:54:50.649292 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:54:52 crc kubenswrapper[4884]: I1128 15:54:52.669960 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerStarted","Data":"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214"} Nov 28 15:54:53 crc kubenswrapper[4884]: I1128 15:54:53.678029 4884 generic.go:334] "Generic (PLEG): container finished" podID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerID="04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214" exitCode=0 Nov 28 15:54:53 crc kubenswrapper[4884]: I1128 15:54:53.678629 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerDied","Data":"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214"} Nov 28 15:54:54 crc kubenswrapper[4884]: I1128 15:54:54.685710 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerStarted","Data":"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc"} Nov 28 15:54:54 crc kubenswrapper[4884]: I1128 15:54:54.708734 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8l7ct" podStartSLOduration=3.293681266 podStartE2EDuration="6.708714773s" podCreationTimestamp="2025-11-28 15:54:48 +0000 UTC" firstStartedPulling="2025-11-28 15:54:50.64866688 +0000 UTC m=+2130.211450731" lastFinishedPulling="2025-11-28 15:54:54.063700437 +0000 UTC m=+2133.626484238" observedRunningTime="2025-11-28 15:54:54.705337691 +0000 UTC m=+2134.268121502" watchObservedRunningTime="2025-11-28 15:54:54.708714773 +0000 UTC m=+2134.271498574" Nov 28 15:54:59 crc kubenswrapper[4884]: I1128 15:54:59.264680 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:54:59 crc kubenswrapper[4884]: I1128 15:54:59.265452 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:55:00 crc kubenswrapper[4884]: I1128 15:55:00.305748 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8l7ct" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="registry-server" probeResult="failure" output=< Nov 28 15:55:00 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 15:55:00 crc kubenswrapper[4884]: > Nov 28 15:55:09 crc kubenswrapper[4884]: I1128 15:55:09.342532 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:55:09 crc kubenswrapper[4884]: I1128 15:55:09.427038 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:55:09 crc kubenswrapper[4884]: I1128 15:55:09.595700 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:55:10 crc kubenswrapper[4884]: I1128 15:55:10.817949 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8l7ct" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="registry-server" containerID="cri-o://e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc" gracePeriod=2 Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.322628 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.462879 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4wfr\" (UniqueName: \"kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr\") pod \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.462980 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content\") pod \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.463201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities\") pod \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\" (UID: \"efb4cdc8-f184-4aa2-96e6-8899eeadf962\") " Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.465076 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities" (OuterVolumeSpecName: "utilities") pod "efb4cdc8-f184-4aa2-96e6-8899eeadf962" (UID: "efb4cdc8-f184-4aa2-96e6-8899eeadf962"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.472926 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr" (OuterVolumeSpecName: "kube-api-access-l4wfr") pod "efb4cdc8-f184-4aa2-96e6-8899eeadf962" (UID: "efb4cdc8-f184-4aa2-96e6-8899eeadf962"). InnerVolumeSpecName "kube-api-access-l4wfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.566982 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.567032 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4wfr\" (UniqueName: \"kubernetes.io/projected/efb4cdc8-f184-4aa2-96e6-8899eeadf962-kube-api-access-l4wfr\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.637321 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "efb4cdc8-f184-4aa2-96e6-8899eeadf962" (UID: "efb4cdc8-f184-4aa2-96e6-8899eeadf962"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.668764 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/efb4cdc8-f184-4aa2-96e6-8899eeadf962-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.826872 4884 generic.go:334] "Generic (PLEG): container finished" podID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerID="e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc" exitCode=0 Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.826934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerDied","Data":"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc"} Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.826972 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8l7ct" event={"ID":"efb4cdc8-f184-4aa2-96e6-8899eeadf962","Type":"ContainerDied","Data":"0a5ba183edb2a99f41e0c4d96b86d222523004fee2739837f4a5e2105a819565"} Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.827002 4884 scope.go:117] "RemoveContainer" containerID="e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.826938 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8l7ct" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.859236 4884 scope.go:117] "RemoveContainer" containerID="04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.863375 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.870278 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8l7ct"] Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.882260 4884 scope.go:117] "RemoveContainer" containerID="1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.904611 4884 scope.go:117] "RemoveContainer" containerID="e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc" Nov 28 15:55:11 crc kubenswrapper[4884]: E1128 15:55:11.905106 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc\": container with ID starting with e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc not found: ID does not exist" containerID="e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.905200 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc"} err="failed to get container status \"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc\": rpc error: code = NotFound desc = could not find container \"e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc\": container with ID starting with e2ecc7682aeb2118b812952552ab77bf1c2f6c43be4a66d7676d79740c0d9bbc not found: ID does not exist" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.905271 4884 scope.go:117] "RemoveContainer" containerID="04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214" Nov 28 15:55:11 crc kubenswrapper[4884]: E1128 15:55:11.905573 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214\": container with ID starting with 04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214 not found: ID does not exist" containerID="04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.905613 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214"} err="failed to get container status \"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214\": rpc error: code = NotFound desc = could not find container \"04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214\": container with ID starting with 04a06aff6360fd9d150ab51be18b68fa58d32b2a1d0d6477ad3d90595c090214 not found: ID does not exist" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.905643 4884 scope.go:117] "RemoveContainer" containerID="1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05" Nov 28 15:55:11 crc kubenswrapper[4884]: E1128 15:55:11.905901 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05\": container with ID starting with 1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05 not found: ID does not exist" containerID="1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05" Nov 28 15:55:11 crc kubenswrapper[4884]: I1128 15:55:11.905979 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05"} err="failed to get container status \"1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05\": rpc error: code = NotFound desc = could not find container \"1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05\": container with ID starting with 1285ce7e28a4f882f469900b44badd9be3584425a5d09059c6d748323eed9b05 not found: ID does not exist" Nov 28 15:55:12 crc kubenswrapper[4884]: I1128 15:55:12.697051 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" path="/var/lib/kubelet/pods/efb4cdc8-f184-4aa2-96e6-8899eeadf962/volumes" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.835405 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:39 crc kubenswrapper[4884]: E1128 15:55:39.836301 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="extract-content" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.836317 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="extract-content" Nov 28 15:55:39 crc kubenswrapper[4884]: E1128 15:55:39.836336 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="extract-utilities" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.836345 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="extract-utilities" Nov 28 15:55:39 crc kubenswrapper[4884]: E1128 15:55:39.836371 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="registry-server" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.836379 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="registry-server" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.836563 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="efb4cdc8-f184-4aa2-96e6-8899eeadf962" containerName="registry-server" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.837651 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:39 crc kubenswrapper[4884]: I1128 15:55:39.859597 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.008569 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.008663 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jwjh\" (UniqueName: \"kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.008829 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.110233 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jwjh\" (UniqueName: \"kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.110287 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.110366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.110811 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.110872 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.135151 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jwjh\" (UniqueName: \"kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh\") pod \"community-operators-cmc5q\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.168271 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:40 crc kubenswrapper[4884]: I1128 15:55:40.658018 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:41 crc kubenswrapper[4884]: I1128 15:55:41.109432 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerID="bd66d7d378fda6ed0f43d8250c8ff09a681b20df7fff0671483423b2cf83d156" exitCode=0 Nov 28 15:55:41 crc kubenswrapper[4884]: I1128 15:55:41.109554 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerDied","Data":"bd66d7d378fda6ed0f43d8250c8ff09a681b20df7fff0671483423b2cf83d156"} Nov 28 15:55:41 crc kubenswrapper[4884]: I1128 15:55:41.109729 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerStarted","Data":"e729f1743e73332fa8e70f8114777030c94faf5f084357ff85d35dc66ada8ce6"} Nov 28 15:55:42 crc kubenswrapper[4884]: I1128 15:55:42.141550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerStarted","Data":"e50f37885f05930e2db40cfb2f2f3b42193e080ae8960e5b656bb4f0bda79f3a"} Nov 28 15:55:43 crc kubenswrapper[4884]: I1128 15:55:43.150369 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerID="e50f37885f05930e2db40cfb2f2f3b42193e080ae8960e5b656bb4f0bda79f3a" exitCode=0 Nov 28 15:55:43 crc kubenswrapper[4884]: I1128 15:55:43.150425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerDied","Data":"e50f37885f05930e2db40cfb2f2f3b42193e080ae8960e5b656bb4f0bda79f3a"} Nov 28 15:55:45 crc kubenswrapper[4884]: I1128 15:55:45.168694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerStarted","Data":"3f4e065b0a7144b125482e877b5286f02d120cd4650f9eda5505142840aea277"} Nov 28 15:55:45 crc kubenswrapper[4884]: I1128 15:55:45.199547 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cmc5q" podStartSLOduration=2.531771782 podStartE2EDuration="6.199528464s" podCreationTimestamp="2025-11-28 15:55:39 +0000 UTC" firstStartedPulling="2025-11-28 15:55:41.110852878 +0000 UTC m=+2180.673636689" lastFinishedPulling="2025-11-28 15:55:44.77860953 +0000 UTC m=+2184.341393371" observedRunningTime="2025-11-28 15:55:45.195342072 +0000 UTC m=+2184.758125933" watchObservedRunningTime="2025-11-28 15:55:45.199528464 +0000 UTC m=+2184.762312265" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.469313 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.471950 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.527837 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.556424 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.556501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.556578 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7x2k\" (UniqueName: \"kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.657645 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.657727 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.657751 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7x2k\" (UniqueName: \"kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.658196 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.658300 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.676355 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7x2k\" (UniqueName: \"kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k\") pod \"redhat-marketplace-j64q7\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:49 crc kubenswrapper[4884]: I1128 15:55:49.804586 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:50 crc kubenswrapper[4884]: I1128 15:55:50.169483 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:50 crc kubenswrapper[4884]: I1128 15:55:50.169837 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:50 crc kubenswrapper[4884]: I1128 15:55:50.253469 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:50 crc kubenswrapper[4884]: I1128 15:55:50.289648 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:55:50 crc kubenswrapper[4884]: I1128 15:55:50.323713 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:51 crc kubenswrapper[4884]: I1128 15:55:51.238826 4884 generic.go:334] "Generic (PLEG): container finished" podID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerID="c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064" exitCode=0 Nov 28 15:55:51 crc kubenswrapper[4884]: I1128 15:55:51.239009 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerDied","Data":"c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064"} Nov 28 15:55:51 crc kubenswrapper[4884]: I1128 15:55:51.240298 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerStarted","Data":"34d5013bac05f3412b114c70f68dce9ba89b0b6049f33b1018c2ca98cc32257d"} Nov 28 15:55:52 crc kubenswrapper[4884]: I1128 15:55:52.643939 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:52 crc kubenswrapper[4884]: I1128 15:55:52.644645 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cmc5q" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="registry-server" containerID="cri-o://3f4e065b0a7144b125482e877b5286f02d120cd4650f9eda5505142840aea277" gracePeriod=2 Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.262063 4884 generic.go:334] "Generic (PLEG): container finished" podID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerID="a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d" exitCode=0 Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.262185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerDied","Data":"a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d"} Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.269253 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerID="3f4e065b0a7144b125482e877b5286f02d120cd4650f9eda5505142840aea277" exitCode=0 Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.269308 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerDied","Data":"3f4e065b0a7144b125482e877b5286f02d120cd4650f9eda5505142840aea277"} Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.694887 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.827262 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content\") pod \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.827399 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities\") pod \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.827428 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jwjh\" (UniqueName: \"kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh\") pod \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\" (UID: \"9a52003e-697a-43df-86ce-75f4e0ca5f5d\") " Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.830016 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities" (OuterVolumeSpecName: "utilities") pod "9a52003e-697a-43df-86ce-75f4e0ca5f5d" (UID: "9a52003e-697a-43df-86ce-75f4e0ca5f5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.836970 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh" (OuterVolumeSpecName: "kube-api-access-2jwjh") pod "9a52003e-697a-43df-86ce-75f4e0ca5f5d" (UID: "9a52003e-697a-43df-86ce-75f4e0ca5f5d"). InnerVolumeSpecName "kube-api-access-2jwjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.879681 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a52003e-697a-43df-86ce-75f4e0ca5f5d" (UID: "9a52003e-697a-43df-86ce-75f4e0ca5f5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.929606 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.929642 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a52003e-697a-43df-86ce-75f4e0ca5f5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:53 crc kubenswrapper[4884]: I1128 15:55:53.929656 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jwjh\" (UniqueName: \"kubernetes.io/projected/9a52003e-697a-43df-86ce-75f4e0ca5f5d-kube-api-access-2jwjh\") on node \"crc\" DevicePath \"\"" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.294818 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerStarted","Data":"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac"} Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.303600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cmc5q" event={"ID":"9a52003e-697a-43df-86ce-75f4e0ca5f5d","Type":"ContainerDied","Data":"e729f1743e73332fa8e70f8114777030c94faf5f084357ff85d35dc66ada8ce6"} Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.303667 4884 scope.go:117] "RemoveContainer" containerID="3f4e065b0a7144b125482e877b5286f02d120cd4650f9eda5505142840aea277" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.303663 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cmc5q" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.331664 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j64q7" podStartSLOduration=2.8030823849999997 podStartE2EDuration="5.331645452s" podCreationTimestamp="2025-11-28 15:55:49 +0000 UTC" firstStartedPulling="2025-11-28 15:55:51.242805033 +0000 UTC m=+2190.805588864" lastFinishedPulling="2025-11-28 15:55:53.77136812 +0000 UTC m=+2193.334151931" observedRunningTime="2025-11-28 15:55:54.325455472 +0000 UTC m=+2193.888239304" watchObservedRunningTime="2025-11-28 15:55:54.331645452 +0000 UTC m=+2193.894429263" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.343351 4884 scope.go:117] "RemoveContainer" containerID="e50f37885f05930e2db40cfb2f2f3b42193e080ae8960e5b656bb4f0bda79f3a" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.352896 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.362572 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cmc5q"] Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.386940 4884 scope.go:117] "RemoveContainer" containerID="bd66d7d378fda6ed0f43d8250c8ff09a681b20df7fff0671483423b2cf83d156" Nov 28 15:55:54 crc kubenswrapper[4884]: I1128 15:55:54.701736 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" path="/var/lib/kubelet/pods/9a52003e-697a-43df-86ce-75f4e0ca5f5d/volumes" Nov 28 15:55:59 crc kubenswrapper[4884]: I1128 15:55:59.805070 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:59 crc kubenswrapper[4884]: I1128 15:55:59.805833 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:55:59 crc kubenswrapper[4884]: I1128 15:55:59.871755 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:56:00 crc kubenswrapper[4884]: I1128 15:56:00.432631 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:56:00 crc kubenswrapper[4884]: I1128 15:56:00.516290 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.381448 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j64q7" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="registry-server" containerID="cri-o://81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac" gracePeriod=2 Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.773206 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.873409 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content\") pod \"57b2f38e-7bc4-433a-bbcb-55de26659d92\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.873481 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7x2k\" (UniqueName: \"kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k\") pod \"57b2f38e-7bc4-433a-bbcb-55de26659d92\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.873524 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities\") pod \"57b2f38e-7bc4-433a-bbcb-55de26659d92\" (UID: \"57b2f38e-7bc4-433a-bbcb-55de26659d92\") " Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.874610 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities" (OuterVolumeSpecName: "utilities") pod "57b2f38e-7bc4-433a-bbcb-55de26659d92" (UID: "57b2f38e-7bc4-433a-bbcb-55de26659d92"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.882893 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k" (OuterVolumeSpecName: "kube-api-access-p7x2k") pod "57b2f38e-7bc4-433a-bbcb-55de26659d92" (UID: "57b2f38e-7bc4-433a-bbcb-55de26659d92"). InnerVolumeSpecName "kube-api-access-p7x2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.892069 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57b2f38e-7bc4-433a-bbcb-55de26659d92" (UID: "57b2f38e-7bc4-433a-bbcb-55de26659d92"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.975874 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.975958 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7x2k\" (UniqueName: \"kubernetes.io/projected/57b2f38e-7bc4-433a-bbcb-55de26659d92-kube-api-access-p7x2k\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:02 crc kubenswrapper[4884]: I1128 15:56:02.975990 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57b2f38e-7bc4-433a-bbcb-55de26659d92-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.395200 4884 generic.go:334] "Generic (PLEG): container finished" podID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerID="81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac" exitCode=0 Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.395288 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerDied","Data":"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac"} Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.395781 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j64q7" event={"ID":"57b2f38e-7bc4-433a-bbcb-55de26659d92","Type":"ContainerDied","Data":"34d5013bac05f3412b114c70f68dce9ba89b0b6049f33b1018c2ca98cc32257d"} Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.395293 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j64q7" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.395878 4884 scope.go:117] "RemoveContainer" containerID="81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.432502 4884 scope.go:117] "RemoveContainer" containerID="a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.512144 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.514443 4884 scope.go:117] "RemoveContainer" containerID="c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.536379 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j64q7"] Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.551987 4884 scope.go:117] "RemoveContainer" containerID="81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac" Nov 28 15:56:03 crc kubenswrapper[4884]: E1128 15:56:03.562269 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac\": container with ID starting with 81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac not found: ID does not exist" containerID="81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.562367 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac"} err="failed to get container status \"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac\": rpc error: code = NotFound desc = could not find container \"81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac\": container with ID starting with 81d9f4b35d8a29d37ad6c3a17c8ae73a54a0067eb8f7be3b8b60d3a6591032ac not found: ID does not exist" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.562400 4884 scope.go:117] "RemoveContainer" containerID="a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d" Nov 28 15:56:03 crc kubenswrapper[4884]: E1128 15:56:03.564303 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d\": container with ID starting with a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d not found: ID does not exist" containerID="a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.564408 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d"} err="failed to get container status \"a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d\": rpc error: code = NotFound desc = could not find container \"a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d\": container with ID starting with a2844617d90d0e5289bd791b6a5a9bcfe66c1d1a79652f5d9a20d94b5af7e56d not found: ID does not exist" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.564439 4884 scope.go:117] "RemoveContainer" containerID="c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064" Nov 28 15:56:03 crc kubenswrapper[4884]: E1128 15:56:03.570306 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064\": container with ID starting with c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064 not found: ID does not exist" containerID="c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064" Nov 28 15:56:03 crc kubenswrapper[4884]: I1128 15:56:03.570361 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064"} err="failed to get container status \"c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064\": rpc error: code = NotFound desc = could not find container \"c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064\": container with ID starting with c3953a3c650128e1522c17d95cacfe97d2d9ab291154ffa6667af55563a1e064 not found: ID does not exist" Nov 28 15:56:04 crc kubenswrapper[4884]: I1128 15:56:04.705888 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" path="/var/lib/kubelet/pods/57b2f38e-7bc4-433a-bbcb-55de26659d92/volumes" Nov 28 15:56:21 crc kubenswrapper[4884]: I1128 15:56:21.242818 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:56:21 crc kubenswrapper[4884]: I1128 15:56:21.243587 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:56:51 crc kubenswrapper[4884]: I1128 15:56:51.243050 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:56:51 crc kubenswrapper[4884]: I1128 15:56:51.243890 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:57:21 crc kubenswrapper[4884]: I1128 15:57:21.242765 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:57:21 crc kubenswrapper[4884]: I1128 15:57:21.243579 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:57:21 crc kubenswrapper[4884]: I1128 15:57:21.243666 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 15:57:21 crc kubenswrapper[4884]: I1128 15:57:21.244874 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:57:21 crc kubenswrapper[4884]: I1128 15:57:21.245016 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" gracePeriod=600 Nov 28 15:57:21 crc kubenswrapper[4884]: E1128 15:57:21.372296 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:57:22 crc kubenswrapper[4884]: I1128 15:57:22.097893 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" exitCode=0 Nov 28 15:57:22 crc kubenswrapper[4884]: I1128 15:57:22.097956 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968"} Nov 28 15:57:22 crc kubenswrapper[4884]: I1128 15:57:22.098048 4884 scope.go:117] "RemoveContainer" containerID="eeae40e64fd19266154157110c0d8142895dcd3bef27cbebfe47b0a46e4d2908" Nov 28 15:57:22 crc kubenswrapper[4884]: I1128 15:57:22.098648 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:57:22 crc kubenswrapper[4884]: E1128 15:57:22.099224 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:57:33 crc kubenswrapper[4884]: I1128 15:57:33.689024 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:57:33 crc kubenswrapper[4884]: E1128 15:57:33.689974 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:57:46 crc kubenswrapper[4884]: I1128 15:57:46.688811 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:57:46 crc kubenswrapper[4884]: E1128 15:57:46.689496 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:57:59 crc kubenswrapper[4884]: I1128 15:57:59.688738 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:57:59 crc kubenswrapper[4884]: E1128 15:57:59.689846 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:58:11 crc kubenswrapper[4884]: I1128 15:58:11.688543 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:58:11 crc kubenswrapper[4884]: E1128 15:58:11.691288 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:58:24 crc kubenswrapper[4884]: I1128 15:58:24.688751 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:58:24 crc kubenswrapper[4884]: E1128 15:58:24.690623 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:58:39 crc kubenswrapper[4884]: I1128 15:58:39.688701 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:58:39 crc kubenswrapper[4884]: E1128 15:58:39.689710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:58:51 crc kubenswrapper[4884]: I1128 15:58:51.689377 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:58:51 crc kubenswrapper[4884]: E1128 15:58:51.690487 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:59:06 crc kubenswrapper[4884]: I1128 15:59:06.688278 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:59:06 crc kubenswrapper[4884]: E1128 15:59:06.688990 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:59:19 crc kubenswrapper[4884]: I1128 15:59:19.688804 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:59:19 crc kubenswrapper[4884]: E1128 15:59:19.691333 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:59:30 crc kubenswrapper[4884]: I1128 15:59:30.693662 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:59:30 crc kubenswrapper[4884]: E1128 15:59:30.695009 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 15:59:45 crc kubenswrapper[4884]: I1128 15:59:45.689482 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 15:59:45 crc kubenswrapper[4884]: E1128 15:59:45.690488 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.151071 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq"] Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152075 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="extract-content" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152115 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="extract-content" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152132 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="extract-utilities" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152140 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="extract-utilities" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152162 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="extract-utilities" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152173 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="extract-utilities" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152198 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152206 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152220 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="extract-content" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152228 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="extract-content" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.152240 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152248 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152422 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a52003e-697a-43df-86ce-75f4e0ca5f5d" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.152444 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="57b2f38e-7bc4-433a-bbcb-55de26659d92" containerName="registry-server" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.153001 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.156511 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.158562 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.170229 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq"] Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.275547 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.275646 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.275782 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zsd2\" (UniqueName: \"kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.377623 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.377696 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.377782 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zsd2\" (UniqueName: \"kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.379441 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.392116 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.408442 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zsd2\" (UniqueName: \"kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2\") pod \"collect-profiles-29405760-254vq\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.477004 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.696774 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:00:00 crc kubenswrapper[4884]: E1128 16:00:00.697122 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:00 crc kubenswrapper[4884]: I1128 16:00:00.913033 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq"] Nov 28 16:00:01 crc kubenswrapper[4884]: I1128 16:00:01.539129 4884 generic.go:334] "Generic (PLEG): container finished" podID="45e5bbc2-2cae-43c5-ae25-714a75aac5ff" containerID="bdf130558afcdca20368ab6b3a1762b9f15162dea3f32e4a7777dd3717c91cac" exitCode=0 Nov 28 16:00:01 crc kubenswrapper[4884]: I1128 16:00:01.539230 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" event={"ID":"45e5bbc2-2cae-43c5-ae25-714a75aac5ff","Type":"ContainerDied","Data":"bdf130558afcdca20368ab6b3a1762b9f15162dea3f32e4a7777dd3717c91cac"} Nov 28 16:00:01 crc kubenswrapper[4884]: I1128 16:00:01.539485 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" event={"ID":"45e5bbc2-2cae-43c5-ae25-714a75aac5ff","Type":"ContainerStarted","Data":"24c7d9faf997f1c189d7bf7973671bda6739af90336f31423178f4def291fa5b"} Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.782867 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.914902 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume\") pod \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.915018 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume\") pod \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.915050 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zsd2\" (UniqueName: \"kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2\") pod \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\" (UID: \"45e5bbc2-2cae-43c5-ae25-714a75aac5ff\") " Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.916215 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume" (OuterVolumeSpecName: "config-volume") pod "45e5bbc2-2cae-43c5-ae25-714a75aac5ff" (UID: "45e5bbc2-2cae-43c5-ae25-714a75aac5ff"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.922517 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2" (OuterVolumeSpecName: "kube-api-access-2zsd2") pod "45e5bbc2-2cae-43c5-ae25-714a75aac5ff" (UID: "45e5bbc2-2cae-43c5-ae25-714a75aac5ff"). InnerVolumeSpecName "kube-api-access-2zsd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:02 crc kubenswrapper[4884]: I1128 16:00:02.922887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "45e5bbc2-2cae-43c5-ae25-714a75aac5ff" (UID: "45e5bbc2-2cae-43c5-ae25-714a75aac5ff"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.017811 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.017882 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.017909 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zsd2\" (UniqueName: \"kubernetes.io/projected/45e5bbc2-2cae-43c5-ae25-714a75aac5ff-kube-api-access-2zsd2\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.555226 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" event={"ID":"45e5bbc2-2cae-43c5-ae25-714a75aac5ff","Type":"ContainerDied","Data":"24c7d9faf997f1c189d7bf7973671bda6739af90336f31423178f4def291fa5b"} Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.555718 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24c7d9faf997f1c189d7bf7973671bda6739af90336f31423178f4def291fa5b" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.555376 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq" Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.889424 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v"] Nov 28 16:00:03 crc kubenswrapper[4884]: I1128 16:00:03.900337 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-lm54v"] Nov 28 16:00:04 crc kubenswrapper[4884]: I1128 16:00:04.697021 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd32abac-bba6-4b9e-bf45-60afad5b0e9e" path="/var/lib/kubelet/pods/bd32abac-bba6-4b9e-bf45-60afad5b0e9e/volumes" Nov 28 16:00:13 crc kubenswrapper[4884]: I1128 16:00:13.688887 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:00:13 crc kubenswrapper[4884]: E1128 16:00:13.689936 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:27 crc kubenswrapper[4884]: I1128 16:00:27.689178 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:00:27 crc kubenswrapper[4884]: E1128 16:00:27.690388 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:28 crc kubenswrapper[4884]: I1128 16:00:28.072555 4884 scope.go:117] "RemoveContainer" containerID="22a5a3e84ca91cb76bdf3e2031cab324b8aa7d47c5e5bd47d1f01ec1ac2aa42c" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.592654 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:33 crc kubenswrapper[4884]: E1128 16:00:33.593586 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e5bbc2-2cae-43c5-ae25-714a75aac5ff" containerName="collect-profiles" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.593600 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e5bbc2-2cae-43c5-ae25-714a75aac5ff" containerName="collect-profiles" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.595130 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e5bbc2-2cae-43c5-ae25-714a75aac5ff" containerName="collect-profiles" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.596154 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.624146 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.714648 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75rlr\" (UniqueName: \"kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.714770 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.714836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.816689 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75rlr\" (UniqueName: \"kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.816803 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.816889 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.817457 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.817974 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.841955 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75rlr\" (UniqueName: \"kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr\") pod \"certified-operators-xfdvg\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:33 crc kubenswrapper[4884]: I1128 16:00:33.936058 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:34 crc kubenswrapper[4884]: I1128 16:00:34.415915 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:34 crc kubenswrapper[4884]: I1128 16:00:34.869496 4884 generic.go:334] "Generic (PLEG): container finished" podID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerID="ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b" exitCode=0 Nov 28 16:00:34 crc kubenswrapper[4884]: I1128 16:00:34.869566 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerDied","Data":"ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b"} Nov 28 16:00:34 crc kubenswrapper[4884]: I1128 16:00:34.869938 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerStarted","Data":"13a15416cefadaebf225309325e034f35e255a516105dfbbc04a7e98b7184f3d"} Nov 28 16:00:34 crc kubenswrapper[4884]: I1128 16:00:34.872585 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:00:35 crc kubenswrapper[4884]: I1128 16:00:35.887015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerStarted","Data":"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05"} Nov 28 16:00:36 crc kubenswrapper[4884]: I1128 16:00:36.904191 4884 generic.go:334] "Generic (PLEG): container finished" podID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerID="70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05" exitCode=0 Nov 28 16:00:36 crc kubenswrapper[4884]: I1128 16:00:36.904259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerDied","Data":"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05"} Nov 28 16:00:39 crc kubenswrapper[4884]: I1128 16:00:39.930029 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerStarted","Data":"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e"} Nov 28 16:00:39 crc kubenswrapper[4884]: I1128 16:00:39.953944 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xfdvg" podStartSLOduration=2.73867625 podStartE2EDuration="6.953923771s" podCreationTimestamp="2025-11-28 16:00:33 +0000 UTC" firstStartedPulling="2025-11-28 16:00:34.872197534 +0000 UTC m=+2474.434981375" lastFinishedPulling="2025-11-28 16:00:39.087445065 +0000 UTC m=+2478.650228896" observedRunningTime="2025-11-28 16:00:39.94733222 +0000 UTC m=+2479.510116041" watchObservedRunningTime="2025-11-28 16:00:39.953923771 +0000 UTC m=+2479.516707572" Nov 28 16:00:41 crc kubenswrapper[4884]: I1128 16:00:41.688014 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:00:41 crc kubenswrapper[4884]: E1128 16:00:41.689532 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:43 crc kubenswrapper[4884]: I1128 16:00:43.936220 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:43 crc kubenswrapper[4884]: I1128 16:00:43.936278 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:43 crc kubenswrapper[4884]: I1128 16:00:43.981947 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:53 crc kubenswrapper[4884]: I1128 16:00:53.993254 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:54 crc kubenswrapper[4884]: I1128 16:00:54.053818 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:54 crc kubenswrapper[4884]: I1128 16:00:54.054063 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xfdvg" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="registry-server" containerID="cri-o://7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e" gracePeriod=2 Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.011767 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.049192 4884 generic.go:334] "Generic (PLEG): container finished" podID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerID="7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e" exitCode=0 Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.049229 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xfdvg" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.049243 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerDied","Data":"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e"} Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.049562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xfdvg" event={"ID":"a28e9e10-8083-4a09-b8da-a40b4c0ec864","Type":"ContainerDied","Data":"13a15416cefadaebf225309325e034f35e255a516105dfbbc04a7e98b7184f3d"} Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.049578 4884 scope.go:117] "RemoveContainer" containerID="7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.086511 4884 scope.go:117] "RemoveContainer" containerID="70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.108587 4884 scope.go:117] "RemoveContainer" containerID="ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.134146 4884 scope.go:117] "RemoveContainer" containerID="7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e" Nov 28 16:00:55 crc kubenswrapper[4884]: E1128 16:00:55.134869 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e\": container with ID starting with 7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e not found: ID does not exist" containerID="7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.134925 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e"} err="failed to get container status \"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e\": rpc error: code = NotFound desc = could not find container \"7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e\": container with ID starting with 7db7a4bd28f64d5522b35f2e70df587aa8cb742fa7bad43303e192d265ac076e not found: ID does not exist" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.134962 4884 scope.go:117] "RemoveContainer" containerID="70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05" Nov 28 16:00:55 crc kubenswrapper[4884]: E1128 16:00:55.135409 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05\": container with ID starting with 70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05 not found: ID does not exist" containerID="70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.135456 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05"} err="failed to get container status \"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05\": rpc error: code = NotFound desc = could not find container \"70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05\": container with ID starting with 70d56c8bf7ca3ada89f65adba8c37de9b73f7d0201789103b38bbf177d1c6e05 not found: ID does not exist" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.135482 4884 scope.go:117] "RemoveContainer" containerID="ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b" Nov 28 16:00:55 crc kubenswrapper[4884]: E1128 16:00:55.135928 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b\": container with ID starting with ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b not found: ID does not exist" containerID="ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.135986 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b"} err="failed to get container status \"ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b\": rpc error: code = NotFound desc = could not find container \"ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b\": container with ID starting with ff39ac01d2fe77660a66d815c9ea485b8df6bfedc7c00f8753ef5bc94cdece6b not found: ID does not exist" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.142271 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities\") pod \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.142407 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content\") pod \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.142467 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75rlr\" (UniqueName: \"kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr\") pod \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\" (UID: \"a28e9e10-8083-4a09-b8da-a40b4c0ec864\") " Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.144030 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities" (OuterVolumeSpecName: "utilities") pod "a28e9e10-8083-4a09-b8da-a40b4c0ec864" (UID: "a28e9e10-8083-4a09-b8da-a40b4c0ec864"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.152589 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr" (OuterVolumeSpecName: "kube-api-access-75rlr") pod "a28e9e10-8083-4a09-b8da-a40b4c0ec864" (UID: "a28e9e10-8083-4a09-b8da-a40b4c0ec864"). InnerVolumeSpecName "kube-api-access-75rlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.232833 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a28e9e10-8083-4a09-b8da-a40b4c0ec864" (UID: "a28e9e10-8083-4a09-b8da-a40b4c0ec864"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.244672 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.244737 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75rlr\" (UniqueName: \"kubernetes.io/projected/a28e9e10-8083-4a09-b8da-a40b4c0ec864-kube-api-access-75rlr\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.244760 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a28e9e10-8083-4a09-b8da-a40b4c0ec864-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.397663 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:55 crc kubenswrapper[4884]: I1128 16:00:55.410539 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xfdvg"] Nov 28 16:00:56 crc kubenswrapper[4884]: I1128 16:00:56.689289 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:00:56 crc kubenswrapper[4884]: E1128 16:00:56.689745 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:00:56 crc kubenswrapper[4884]: I1128 16:00:56.705044 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" path="/var/lib/kubelet/pods/a28e9e10-8083-4a09-b8da-a40b4c0ec864/volumes" Nov 28 16:01:08 crc kubenswrapper[4884]: I1128 16:01:08.688913 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:01:08 crc kubenswrapper[4884]: E1128 16:01:08.690204 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:01:20 crc kubenswrapper[4884]: I1128 16:01:20.696765 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:01:20 crc kubenswrapper[4884]: E1128 16:01:20.697822 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:01:33 crc kubenswrapper[4884]: I1128 16:01:33.688823 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:01:33 crc kubenswrapper[4884]: E1128 16:01:33.689897 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:01:45 crc kubenswrapper[4884]: I1128 16:01:45.688455 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:01:45 crc kubenswrapper[4884]: E1128 16:01:45.689232 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:01:57 crc kubenswrapper[4884]: I1128 16:01:57.688418 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:01:57 crc kubenswrapper[4884]: E1128 16:01:57.689193 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:02:12 crc kubenswrapper[4884]: I1128 16:02:12.689054 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:02:12 crc kubenswrapper[4884]: E1128 16:02:12.689658 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:02:27 crc kubenswrapper[4884]: I1128 16:02:27.688125 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:02:28 crc kubenswrapper[4884]: I1128 16:02:28.909773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab"} Nov 28 16:04:51 crc kubenswrapper[4884]: I1128 16:04:51.243423 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:04:51 crc kubenswrapper[4884]: I1128 16:04:51.244163 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:05:21 crc kubenswrapper[4884]: I1128 16:05:21.243471 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:05:21 crc kubenswrapper[4884]: I1128 16:05:21.243949 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.211765 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:40 crc kubenswrapper[4884]: E1128 16:05:40.213324 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="extract-content" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.213360 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="extract-content" Nov 28 16:05:40 crc kubenswrapper[4884]: E1128 16:05:40.213409 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="registry-server" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.213426 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="registry-server" Nov 28 16:05:40 crc kubenswrapper[4884]: E1128 16:05:40.213520 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="extract-utilities" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.213539 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="extract-utilities" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.213895 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a28e9e10-8083-4a09-b8da-a40b4c0ec864" containerName="registry-server" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.216318 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.226968 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.376025 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.376291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.376364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8vgl\" (UniqueName: \"kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.477428 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.477499 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8vgl\" (UniqueName: \"kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.477551 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.477978 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.478046 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.506369 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8vgl\" (UniqueName: \"kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl\") pod \"community-operators-6cns8\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:40 crc kubenswrapper[4884]: I1128 16:05:40.548274 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:41 crc kubenswrapper[4884]: I1128 16:05:41.100356 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:41 crc kubenswrapper[4884]: I1128 16:05:41.137962 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerStarted","Data":"f2e9d7f184fc03d6702f35032a741f0694753dc2448f6d4ef4deab3bbaa054bf"} Nov 28 16:05:42 crc kubenswrapper[4884]: I1128 16:05:42.153878 4884 generic.go:334] "Generic (PLEG): container finished" podID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerID="701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7" exitCode=0 Nov 28 16:05:42 crc kubenswrapper[4884]: I1128 16:05:42.154149 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerDied","Data":"701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7"} Nov 28 16:05:42 crc kubenswrapper[4884]: I1128 16:05:42.157830 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:05:44 crc kubenswrapper[4884]: I1128 16:05:44.185865 4884 generic.go:334] "Generic (PLEG): container finished" podID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerID="a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3" exitCode=0 Nov 28 16:05:44 crc kubenswrapper[4884]: I1128 16:05:44.185931 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerDied","Data":"a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3"} Nov 28 16:05:46 crc kubenswrapper[4884]: I1128 16:05:46.209164 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerStarted","Data":"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db"} Nov 28 16:05:46 crc kubenswrapper[4884]: I1128 16:05:46.244021 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6cns8" podStartSLOduration=3.378300739 podStartE2EDuration="6.243995737s" podCreationTimestamp="2025-11-28 16:05:40 +0000 UTC" firstStartedPulling="2025-11-28 16:05:42.157384767 +0000 UTC m=+2781.720168608" lastFinishedPulling="2025-11-28 16:05:45.023079795 +0000 UTC m=+2784.585863606" observedRunningTime="2025-11-28 16:05:46.238303008 +0000 UTC m=+2785.801086819" watchObservedRunningTime="2025-11-28 16:05:46.243995737 +0000 UTC m=+2785.806779568" Nov 28 16:05:50 crc kubenswrapper[4884]: I1128 16:05:50.548532 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:50 crc kubenswrapper[4884]: I1128 16:05:50.549219 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:50 crc kubenswrapper[4884]: I1128 16:05:50.603613 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.242779 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.242836 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.242875 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.243324 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.243384 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab" gracePeriod=600 Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.331151 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:51 crc kubenswrapper[4884]: I1128 16:05:51.393119 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:52 crc kubenswrapper[4884]: I1128 16:05:52.267859 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab" exitCode=0 Nov 28 16:05:52 crc kubenswrapper[4884]: I1128 16:05:52.267937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab"} Nov 28 16:05:52 crc kubenswrapper[4884]: I1128 16:05:52.268002 4884 scope.go:117] "RemoveContainer" containerID="46beec55c1f17b08c96b3db56c1678c5d1c0b70ba1598a7d01ccb48646f08968" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.277730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325"} Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.278033 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6cns8" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="registry-server" containerID="cri-o://e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db" gracePeriod=2 Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.782123 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.875361 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8vgl\" (UniqueName: \"kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl\") pod \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.875612 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities\") pod \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.875780 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content\") pod \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\" (UID: \"0b0c893c-50db-430c-a66e-8a5bcd9a39b3\") " Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.876759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities" (OuterVolumeSpecName: "utilities") pod "0b0c893c-50db-430c-a66e-8a5bcd9a39b3" (UID: "0b0c893c-50db-430c-a66e-8a5bcd9a39b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.889800 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl" (OuterVolumeSpecName: "kube-api-access-g8vgl") pod "0b0c893c-50db-430c-a66e-8a5bcd9a39b3" (UID: "0b0c893c-50db-430c-a66e-8a5bcd9a39b3"). InnerVolumeSpecName "kube-api-access-g8vgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.940852 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b0c893c-50db-430c-a66e-8a5bcd9a39b3" (UID: "0b0c893c-50db-430c-a66e-8a5bcd9a39b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.976984 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8vgl\" (UniqueName: \"kubernetes.io/projected/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-kube-api-access-g8vgl\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.977433 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:53 crc kubenswrapper[4884]: I1128 16:05:53.977445 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b0c893c-50db-430c-a66e-8a5bcd9a39b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.294486 4884 generic.go:334] "Generic (PLEG): container finished" podID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerID="e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db" exitCode=0 Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.294651 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6cns8" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.294600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerDied","Data":"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db"} Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.294859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6cns8" event={"ID":"0b0c893c-50db-430c-a66e-8a5bcd9a39b3","Type":"ContainerDied","Data":"f2e9d7f184fc03d6702f35032a741f0694753dc2448f6d4ef4deab3bbaa054bf"} Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.294886 4884 scope.go:117] "RemoveContainer" containerID="e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.319934 4884 scope.go:117] "RemoveContainer" containerID="a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.359782 4884 scope.go:117] "RemoveContainer" containerID="701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.363252 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.368876 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6cns8"] Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.386991 4884 scope.go:117] "RemoveContainer" containerID="e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db" Nov 28 16:05:54 crc kubenswrapper[4884]: E1128 16:05:54.387566 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db\": container with ID starting with e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db not found: ID does not exist" containerID="e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.387632 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db"} err="failed to get container status \"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db\": rpc error: code = NotFound desc = could not find container \"e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db\": container with ID starting with e5db34782d1b1f6dabca5989774e041786c225331e9a7c74b0db9e5a1de7c4db not found: ID does not exist" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.387672 4884 scope.go:117] "RemoveContainer" containerID="a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3" Nov 28 16:05:54 crc kubenswrapper[4884]: E1128 16:05:54.388085 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3\": container with ID starting with a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3 not found: ID does not exist" containerID="a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.388227 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3"} err="failed to get container status \"a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3\": rpc error: code = NotFound desc = could not find container \"a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3\": container with ID starting with a91ab8a40cc11de6610e477c0631321c29ce9cccfdb6eb04b13141a0e688c1e3 not found: ID does not exist" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.388278 4884 scope.go:117] "RemoveContainer" containerID="701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7" Nov 28 16:05:54 crc kubenswrapper[4884]: E1128 16:05:54.388940 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7\": container with ID starting with 701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7 not found: ID does not exist" containerID="701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.388987 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7"} err="failed to get container status \"701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7\": rpc error: code = NotFound desc = could not find container \"701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7\": container with ID starting with 701c51ec965f36e5bc0c8963143ab9935091589a4c68aac22f9c64aa8a5d21e7 not found: ID does not exist" Nov 28 16:05:54 crc kubenswrapper[4884]: I1128 16:05:54.699902 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" path="/var/lib/kubelet/pods/0b0c893c-50db-430c-a66e-8a5bcd9a39b3/volumes" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.618338 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:05:59 crc kubenswrapper[4884]: E1128 16:05:59.619622 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="extract-content" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.619648 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="extract-content" Nov 28 16:05:59 crc kubenswrapper[4884]: E1128 16:05:59.619684 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="registry-server" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.619697 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="registry-server" Nov 28 16:05:59 crc kubenswrapper[4884]: E1128 16:05:59.619735 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="extract-utilities" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.619748 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="extract-utilities" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.620008 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b0c893c-50db-430c-a66e-8a5bcd9a39b3" containerName="registry-server" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.621896 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.641600 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.665021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.665263 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.665498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn86c\" (UniqueName: \"kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.766562 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.766689 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn86c\" (UniqueName: \"kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.766729 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.767450 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.767471 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.805210 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn86c\" (UniqueName: \"kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c\") pod \"redhat-operators-bzwqs\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:05:59 crc kubenswrapper[4884]: I1128 16:05:59.951879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:00 crc kubenswrapper[4884]: I1128 16:06:00.198463 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:06:00 crc kubenswrapper[4884]: I1128 16:06:00.345951 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerStarted","Data":"122f0efd304b9badaeb489598c63ea2ef71304e2a0de06290afffa5b9da2c4ee"} Nov 28 16:06:01 crc kubenswrapper[4884]: I1128 16:06:01.360452 4884 generic.go:334] "Generic (PLEG): container finished" podID="93e5978d-8637-44d1-83be-96adb47186e1" containerID="4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2" exitCode=0 Nov 28 16:06:01 crc kubenswrapper[4884]: I1128 16:06:01.360537 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerDied","Data":"4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2"} Nov 28 16:06:03 crc kubenswrapper[4884]: I1128 16:06:03.379846 4884 generic.go:334] "Generic (PLEG): container finished" podID="93e5978d-8637-44d1-83be-96adb47186e1" containerID="714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7" exitCode=0 Nov 28 16:06:03 crc kubenswrapper[4884]: I1128 16:06:03.379911 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerDied","Data":"714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7"} Nov 28 16:06:04 crc kubenswrapper[4884]: I1128 16:06:04.397257 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerStarted","Data":"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5"} Nov 28 16:06:04 crc kubenswrapper[4884]: I1128 16:06:04.422446 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bzwqs" podStartSLOduration=2.729712415 podStartE2EDuration="5.42242658s" podCreationTimestamp="2025-11-28 16:05:59 +0000 UTC" firstStartedPulling="2025-11-28 16:06:01.363906643 +0000 UTC m=+2800.926690484" lastFinishedPulling="2025-11-28 16:06:04.056620848 +0000 UTC m=+2803.619404649" observedRunningTime="2025-11-28 16:06:04.414683501 +0000 UTC m=+2803.977467312" watchObservedRunningTime="2025-11-28 16:06:04.42242658 +0000 UTC m=+2803.985210401" Nov 28 16:06:09 crc kubenswrapper[4884]: I1128 16:06:09.952130 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:09 crc kubenswrapper[4884]: I1128 16:06:09.952490 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:10 crc kubenswrapper[4884]: I1128 16:06:10.016035 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:10 crc kubenswrapper[4884]: I1128 16:06:10.501306 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:11 crc kubenswrapper[4884]: I1128 16:06:11.254694 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.467158 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bzwqs" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="registry-server" containerID="cri-o://f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5" gracePeriod=2 Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.835690 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.853886 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn86c\" (UniqueName: \"kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c\") pod \"93e5978d-8637-44d1-83be-96adb47186e1\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.853941 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities\") pod \"93e5978d-8637-44d1-83be-96adb47186e1\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.853988 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content\") pod \"93e5978d-8637-44d1-83be-96adb47186e1\" (UID: \"93e5978d-8637-44d1-83be-96adb47186e1\") " Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.854967 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities" (OuterVolumeSpecName: "utilities") pod "93e5978d-8637-44d1-83be-96adb47186e1" (UID: "93e5978d-8637-44d1-83be-96adb47186e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.861287 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c" (OuterVolumeSpecName: "kube-api-access-vn86c") pod "93e5978d-8637-44d1-83be-96adb47186e1" (UID: "93e5978d-8637-44d1-83be-96adb47186e1"). InnerVolumeSpecName "kube-api-access-vn86c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.955909 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn86c\" (UniqueName: \"kubernetes.io/projected/93e5978d-8637-44d1-83be-96adb47186e1-kube-api-access-vn86c\") on node \"crc\" DevicePath \"\"" Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.955961 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:06:12 crc kubenswrapper[4884]: I1128 16:06:12.986639 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93e5978d-8637-44d1-83be-96adb47186e1" (UID: "93e5978d-8637-44d1-83be-96adb47186e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.057356 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93e5978d-8637-44d1-83be-96adb47186e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.479128 4884 generic.go:334] "Generic (PLEG): container finished" podID="93e5978d-8637-44d1-83be-96adb47186e1" containerID="f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5" exitCode=0 Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.479186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerDied","Data":"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5"} Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.479223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzwqs" event={"ID":"93e5978d-8637-44d1-83be-96adb47186e1","Type":"ContainerDied","Data":"122f0efd304b9badaeb489598c63ea2ef71304e2a0de06290afffa5b9da2c4ee"} Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.479246 4884 scope.go:117] "RemoveContainer" containerID="f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.479282 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzwqs" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.503183 4884 scope.go:117] "RemoveContainer" containerID="714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.533860 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.534996 4884 scope.go:117] "RemoveContainer" containerID="4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.541028 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bzwqs"] Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.569144 4884 scope.go:117] "RemoveContainer" containerID="f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5" Nov 28 16:06:13 crc kubenswrapper[4884]: E1128 16:06:13.569827 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5\": container with ID starting with f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5 not found: ID does not exist" containerID="f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.569881 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5"} err="failed to get container status \"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5\": rpc error: code = NotFound desc = could not find container \"f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5\": container with ID starting with f32aeaf466a71c82ac8e5fafbe10e3a4a3df2d19dade423f3cea6d3f6cb2a2c5 not found: ID does not exist" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.569914 4884 scope.go:117] "RemoveContainer" containerID="714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7" Nov 28 16:06:13 crc kubenswrapper[4884]: E1128 16:06:13.570320 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7\": container with ID starting with 714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7 not found: ID does not exist" containerID="714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.570360 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7"} err="failed to get container status \"714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7\": rpc error: code = NotFound desc = could not find container \"714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7\": container with ID starting with 714b90e6114a1e2f6f056c4c3cf46dbe9ca786e81d08c9c85d6c008e443884d7 not found: ID does not exist" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.570392 4884 scope.go:117] "RemoveContainer" containerID="4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2" Nov 28 16:06:13 crc kubenswrapper[4884]: E1128 16:06:13.570719 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2\": container with ID starting with 4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2 not found: ID does not exist" containerID="4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2" Nov 28 16:06:13 crc kubenswrapper[4884]: I1128 16:06:13.570870 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2"} err="failed to get container status \"4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2\": rpc error: code = NotFound desc = could not find container \"4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2\": container with ID starting with 4f81708d4d88067b9fb1e1882aa0f4cc3923f6f52f6a75e4a6ad87ed24aa1df2 not found: ID does not exist" Nov 28 16:06:14 crc kubenswrapper[4884]: I1128 16:06:14.700561 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93e5978d-8637-44d1-83be-96adb47186e1" path="/var/lib/kubelet/pods/93e5978d-8637-44d1-83be-96adb47186e1/volumes" Nov 28 16:08:21 crc kubenswrapper[4884]: I1128 16:08:21.242789 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:08:21 crc kubenswrapper[4884]: I1128 16:08:21.243473 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:08:51 crc kubenswrapper[4884]: I1128 16:08:51.242793 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:08:51 crc kubenswrapper[4884]: I1128 16:08:51.244377 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:09:21 crc kubenswrapper[4884]: I1128 16:09:21.243049 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:09:21 crc kubenswrapper[4884]: I1128 16:09:21.243802 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:09:21 crc kubenswrapper[4884]: I1128 16:09:21.243874 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:09:21 crc kubenswrapper[4884]: I1128 16:09:21.244976 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:09:21 crc kubenswrapper[4884]: I1128 16:09:21.245121 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" gracePeriod=600 Nov 28 16:09:21 crc kubenswrapper[4884]: E1128 16:09:21.945069 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:09:22 crc kubenswrapper[4884]: I1128 16:09:22.407552 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" exitCode=0 Nov 28 16:09:22 crc kubenswrapper[4884]: I1128 16:09:22.407671 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325"} Nov 28 16:09:22 crc kubenswrapper[4884]: I1128 16:09:22.407991 4884 scope.go:117] "RemoveContainer" containerID="7f99298659f1be858ec3b1dd152e125268ccd25702041569a13ed1b9ad071eab" Nov 28 16:09:22 crc kubenswrapper[4884]: I1128 16:09:22.409059 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:09:22 crc kubenswrapper[4884]: E1128 16:09:22.409715 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:09:36 crc kubenswrapper[4884]: I1128 16:09:36.688381 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:09:36 crc kubenswrapper[4884]: E1128 16:09:36.689157 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:09:48 crc kubenswrapper[4884]: I1128 16:09:48.689075 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:09:48 crc kubenswrapper[4884]: E1128 16:09:48.690391 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:10:03 crc kubenswrapper[4884]: I1128 16:10:03.688754 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:10:03 crc kubenswrapper[4884]: E1128 16:10:03.689591 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:10:17 crc kubenswrapper[4884]: I1128 16:10:17.687789 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:10:17 crc kubenswrapper[4884]: E1128 16:10:17.688535 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:10:30 crc kubenswrapper[4884]: I1128 16:10:30.695357 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:10:30 crc kubenswrapper[4884]: E1128 16:10:30.696280 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:10:45 crc kubenswrapper[4884]: I1128 16:10:45.689311 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:10:45 crc kubenswrapper[4884]: E1128 16:10:45.690206 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:10:58 crc kubenswrapper[4884]: I1128 16:10:58.688193 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:10:58 crc kubenswrapper[4884]: E1128 16:10:58.688937 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:11:13 crc kubenswrapper[4884]: I1128 16:11:13.688566 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:11:13 crc kubenswrapper[4884]: E1128 16:11:13.690009 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:11:25 crc kubenswrapper[4884]: I1128 16:11:25.689246 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:11:25 crc kubenswrapper[4884]: E1128 16:11:25.690067 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:11:39 crc kubenswrapper[4884]: I1128 16:11:39.689141 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:11:39 crc kubenswrapper[4884]: E1128 16:11:39.690249 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:11:52 crc kubenswrapper[4884]: I1128 16:11:52.688898 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:11:52 crc kubenswrapper[4884]: E1128 16:11:52.689908 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:03 crc kubenswrapper[4884]: I1128 16:12:03.688541 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:12:03 crc kubenswrapper[4884]: E1128 16:12:03.689487 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:18 crc kubenswrapper[4884]: I1128 16:12:18.689307 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:12:18 crc kubenswrapper[4884]: E1128 16:12:18.690269 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:31 crc kubenswrapper[4884]: I1128 16:12:31.689287 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:12:31 crc kubenswrapper[4884]: E1128 16:12:31.690135 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:42 crc kubenswrapper[4884]: I1128 16:12:42.688893 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:12:42 crc kubenswrapper[4884]: E1128 16:12:42.689607 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.046981 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:43 crc kubenswrapper[4884]: E1128 16:12:43.047521 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="registry-server" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.047550 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="registry-server" Nov 28 16:12:43 crc kubenswrapper[4884]: E1128 16:12:43.047586 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="extract-content" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.047599 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="extract-content" Nov 28 16:12:43 crc kubenswrapper[4884]: E1128 16:12:43.047626 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="extract-utilities" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.047641 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="extract-utilities" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.047936 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="93e5978d-8637-44d1-83be-96adb47186e1" containerName="registry-server" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.049674 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.064753 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.111488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.111576 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.111625 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4zzt\" (UniqueName: \"kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.212614 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.212714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.212758 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4zzt\" (UniqueName: \"kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.213357 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.213494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.231886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4zzt\" (UniqueName: \"kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt\") pod \"redhat-marketplace-d8rvb\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.371379 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.659444 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.661932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.676417 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.828169 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6mtf\" (UniqueName: \"kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.828251 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.828314 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.864807 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.929937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6mtf\" (UniqueName: \"kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.930004 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.930035 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.930549 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.931071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.948634 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6mtf\" (UniqueName: \"kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf\") pod \"certified-operators-lp9dw\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:43 crc kubenswrapper[4884]: I1128 16:12:43.998676 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.291258 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:44 crc kubenswrapper[4884]: W1128 16:12:44.298646 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0cb7c9c_a6ec_4878_b9bb_2e67ccd1bafa.slice/crio-7d7fead8681cc89065467782707d93cf5489804ea46e4b31beb428dac5027910 WatchSource:0}: Error finding container 7d7fead8681cc89065467782707d93cf5489804ea46e4b31beb428dac5027910: Status 404 returned error can't find the container with id 7d7fead8681cc89065467782707d93cf5489804ea46e4b31beb428dac5027910 Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.513668 4884 generic.go:334] "Generic (PLEG): container finished" podID="cfbce0e4-7684-49de-a534-84df90669320" containerID="1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351" exitCode=0 Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.513720 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerDied","Data":"1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351"} Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.513942 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerStarted","Data":"ae927dfcc4a99034d3405b9629175c1faa81144a9752854a65a68a402a4c6ec8"} Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.515333 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.515748 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerID="2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc" exitCode=0 Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.515779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerDied","Data":"2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc"} Nov 28 16:12:44 crc kubenswrapper[4884]: I1128 16:12:44.515802 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerStarted","Data":"7d7fead8681cc89065467782707d93cf5489804ea46e4b31beb428dac5027910"} Nov 28 16:12:46 crc kubenswrapper[4884]: I1128 16:12:46.537418 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerID="84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c" exitCode=0 Nov 28 16:12:46 crc kubenswrapper[4884]: I1128 16:12:46.537469 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerDied","Data":"84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c"} Nov 28 16:12:46 crc kubenswrapper[4884]: I1128 16:12:46.540075 4884 generic.go:334] "Generic (PLEG): container finished" podID="cfbce0e4-7684-49de-a534-84df90669320" containerID="0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf" exitCode=0 Nov 28 16:12:46 crc kubenswrapper[4884]: I1128 16:12:46.540159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerDied","Data":"0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf"} Nov 28 16:12:47 crc kubenswrapper[4884]: I1128 16:12:47.552289 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerStarted","Data":"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3"} Nov 28 16:12:47 crc kubenswrapper[4884]: I1128 16:12:47.556219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerStarted","Data":"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293"} Nov 28 16:12:47 crc kubenswrapper[4884]: I1128 16:12:47.591340 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lp9dw" podStartSLOduration=1.9185186220000001 podStartE2EDuration="4.591306894s" podCreationTimestamp="2025-11-28 16:12:43 +0000 UTC" firstStartedPulling="2025-11-28 16:12:44.517385536 +0000 UTC m=+3204.080169337" lastFinishedPulling="2025-11-28 16:12:47.190173768 +0000 UTC m=+3206.752957609" observedRunningTime="2025-11-28 16:12:47.577900537 +0000 UTC m=+3207.140684408" watchObservedRunningTime="2025-11-28 16:12:47.591306894 +0000 UTC m=+3207.154090735" Nov 28 16:12:47 crc kubenswrapper[4884]: I1128 16:12:47.610878 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d8rvb" podStartSLOduration=2.162111357 podStartE2EDuration="4.610854812s" podCreationTimestamp="2025-11-28 16:12:43 +0000 UTC" firstStartedPulling="2025-11-28 16:12:44.51511762 +0000 UTC m=+3204.077901421" lastFinishedPulling="2025-11-28 16:12:46.963861035 +0000 UTC m=+3206.526644876" observedRunningTime="2025-11-28 16:12:47.609840438 +0000 UTC m=+3207.172624279" watchObservedRunningTime="2025-11-28 16:12:47.610854812 +0000 UTC m=+3207.173638653" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.371929 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.372684 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.455302 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.673023 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.739615 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.999179 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:53 crc kubenswrapper[4884]: I1128 16:12:53.999267 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:54 crc kubenswrapper[4884]: I1128 16:12:54.058014 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:54 crc kubenswrapper[4884]: I1128 16:12:54.719243 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:55 crc kubenswrapper[4884]: I1128 16:12:55.650298 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d8rvb" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="registry-server" containerID="cri-o://c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293" gracePeriod=2 Nov 28 16:12:56 crc kubenswrapper[4884]: I1128 16:12:56.104169 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:56 crc kubenswrapper[4884]: I1128 16:12:56.656826 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lp9dw" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="registry-server" containerID="cri-o://65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3" gracePeriod=2 Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.131558 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.242644 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities\") pod \"cfbce0e4-7684-49de-a534-84df90669320\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.242722 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4zzt\" (UniqueName: \"kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt\") pod \"cfbce0e4-7684-49de-a534-84df90669320\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.242756 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content\") pod \"cfbce0e4-7684-49de-a534-84df90669320\" (UID: \"cfbce0e4-7684-49de-a534-84df90669320\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.243937 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities" (OuterVolumeSpecName: "utilities") pod "cfbce0e4-7684-49de-a534-84df90669320" (UID: "cfbce0e4-7684-49de-a534-84df90669320"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.248184 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt" (OuterVolumeSpecName: "kube-api-access-d4zzt") pod "cfbce0e4-7684-49de-a534-84df90669320" (UID: "cfbce0e4-7684-49de-a534-84df90669320"). InnerVolumeSpecName "kube-api-access-d4zzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.269654 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cfbce0e4-7684-49de-a534-84df90669320" (UID: "cfbce0e4-7684-49de-a534-84df90669320"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.344280 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.344321 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4zzt\" (UniqueName: \"kubernetes.io/projected/cfbce0e4-7684-49de-a534-84df90669320-kube-api-access-d4zzt\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.344336 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cfbce0e4-7684-49de-a534-84df90669320-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.515234 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.649798 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities\") pod \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.649841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6mtf\" (UniqueName: \"kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf\") pod \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.649970 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content\") pod \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\" (UID: \"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa\") " Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.650884 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities" (OuterVolumeSpecName: "utilities") pod "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" (UID: "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.655942 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf" (OuterVolumeSpecName: "kube-api-access-g6mtf") pod "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" (UID: "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa"). InnerVolumeSpecName "kube-api-access-g6mtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.664845 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerID="65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3" exitCode=0 Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.664938 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lp9dw" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.664945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerDied","Data":"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3"} Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.665059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lp9dw" event={"ID":"c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa","Type":"ContainerDied","Data":"7d7fead8681cc89065467782707d93cf5489804ea46e4b31beb428dac5027910"} Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.665081 4884 scope.go:117] "RemoveContainer" containerID="65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.667778 4884 generic.go:334] "Generic (PLEG): container finished" podID="cfbce0e4-7684-49de-a534-84df90669320" containerID="c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293" exitCode=0 Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.667818 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerDied","Data":"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293"} Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.667846 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d8rvb" event={"ID":"cfbce0e4-7684-49de-a534-84df90669320","Type":"ContainerDied","Data":"ae927dfcc4a99034d3405b9629175c1faa81144a9752854a65a68a402a4c6ec8"} Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.667899 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d8rvb" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.686299 4884 scope.go:117] "RemoveContainer" containerID="84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.687933 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.688275 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.709608 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.712421 4884 scope.go:117] "RemoveContainer" containerID="2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.713906 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" (UID: "c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.717496 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d8rvb"] Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.732878 4884 scope.go:117] "RemoveContainer" containerID="65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.733328 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3\": container with ID starting with 65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3 not found: ID does not exist" containerID="65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.733368 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3"} err="failed to get container status \"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3\": rpc error: code = NotFound desc = could not find container \"65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3\": container with ID starting with 65cebee61827c90f08c5780cd3c2c8ae0ea0f7edb3c9561272f5cb5554d9a0d3 not found: ID does not exist" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.733403 4884 scope.go:117] "RemoveContainer" containerID="84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.733839 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c\": container with ID starting with 84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c not found: ID does not exist" containerID="84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.733868 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c"} err="failed to get container status \"84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c\": rpc error: code = NotFound desc = could not find container \"84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c\": container with ID starting with 84ef03ab8fca5d0d2404cfdc138075243b029cf9b6b94ff19a36d3ace9f9776c not found: ID does not exist" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.733912 4884 scope.go:117] "RemoveContainer" containerID="2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.734157 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc\": container with ID starting with 2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc not found: ID does not exist" containerID="2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.734200 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc"} err="failed to get container status \"2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc\": rpc error: code = NotFound desc = could not find container \"2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc\": container with ID starting with 2b96f147cba9c151408c14120e79d4d38d77b0b026b17791ac8d94a8b1e9a8bc not found: ID does not exist" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.734227 4884 scope.go:117] "RemoveContainer" containerID="c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.751506 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.751532 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6mtf\" (UniqueName: \"kubernetes.io/projected/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-kube-api-access-g6mtf\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.751540 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.755385 4884 scope.go:117] "RemoveContainer" containerID="0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.808735 4884 scope.go:117] "RemoveContainer" containerID="1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.840470 4884 scope.go:117] "RemoveContainer" containerID="c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.840950 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293\": container with ID starting with c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293 not found: ID does not exist" containerID="c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.840994 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293"} err="failed to get container status \"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293\": rpc error: code = NotFound desc = could not find container \"c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293\": container with ID starting with c613e36c23648d886a7bc7d45bebff420d2ecd2f02c478707b99c3a6e405d293 not found: ID does not exist" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.841024 4884 scope.go:117] "RemoveContainer" containerID="0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.841633 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf\": container with ID starting with 0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf not found: ID does not exist" containerID="0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.841666 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf"} err="failed to get container status \"0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf\": rpc error: code = NotFound desc = could not find container \"0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf\": container with ID starting with 0f4241e96493963c874edb1cc57c3243c09ed8f5c934a46a0fdf21e8025390bf not found: ID does not exist" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.841717 4884 scope.go:117] "RemoveContainer" containerID="1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351" Nov 28 16:12:57 crc kubenswrapper[4884]: E1128 16:12:57.842125 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351\": container with ID starting with 1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351 not found: ID does not exist" containerID="1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351" Nov 28 16:12:57 crc kubenswrapper[4884]: I1128 16:12:57.842157 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351"} err="failed to get container status \"1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351\": rpc error: code = NotFound desc = could not find container \"1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351\": container with ID starting with 1967f1000ebab7370ccbfb9b7219f0ff29de6d830f4c0b049ec261005d369351 not found: ID does not exist" Nov 28 16:12:58 crc kubenswrapper[4884]: I1128 16:12:58.016411 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:58 crc kubenswrapper[4884]: I1128 16:12:58.030862 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lp9dw"] Nov 28 16:12:58 crc kubenswrapper[4884]: I1128 16:12:58.706416 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" path="/var/lib/kubelet/pods/c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa/volumes" Nov 28 16:12:58 crc kubenswrapper[4884]: I1128 16:12:58.707865 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfbce0e4-7684-49de-a534-84df90669320" path="/var/lib/kubelet/pods/cfbce0e4-7684-49de-a534-84df90669320/volumes" Nov 28 16:13:09 crc kubenswrapper[4884]: I1128 16:13:09.688844 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:13:09 crc kubenswrapper[4884]: E1128 16:13:09.691243 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:13:24 crc kubenswrapper[4884]: I1128 16:13:24.689238 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:13:24 crc kubenswrapper[4884]: E1128 16:13:24.689732 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:13:39 crc kubenswrapper[4884]: I1128 16:13:39.688550 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:13:39 crc kubenswrapper[4884]: E1128 16:13:39.689330 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:13:50 crc kubenswrapper[4884]: I1128 16:13:50.696971 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:13:50 crc kubenswrapper[4884]: E1128 16:13:50.698027 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:14:04 crc kubenswrapper[4884]: I1128 16:14:04.689227 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:14:04 crc kubenswrapper[4884]: E1128 16:14:04.690251 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:14:19 crc kubenswrapper[4884]: I1128 16:14:19.688873 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:14:19 crc kubenswrapper[4884]: E1128 16:14:19.689670 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:14:33 crc kubenswrapper[4884]: I1128 16:14:33.688648 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:14:34 crc kubenswrapper[4884]: I1128 16:14:34.468074 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb"} Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.142015 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8"] Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.145832 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.145944 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.146239 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.146364 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.146545 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.146682 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.146831 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.146956 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.147062 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.147231 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4884]: E1128 16:15:00.147343 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.147442 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.148633 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfbce0e4-7684-49de-a534-84df90669320" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.148783 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0cb7c9c-a6ec-4878-b9bb-2e67ccd1bafa" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.150983 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.154744 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.154907 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.156562 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8"] Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.221265 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.221336 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6vxh\" (UniqueName: \"kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.221445 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.322739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.322847 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.322888 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6vxh\" (UniqueName: \"kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.323813 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.331786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.340611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6vxh\" (UniqueName: \"kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh\") pod \"collect-profiles-29405775-ncld8\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.484006 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:00 crc kubenswrapper[4884]: I1128 16:15:00.909498 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8"] Nov 28 16:15:01 crc kubenswrapper[4884]: I1128 16:15:01.702322 4884 generic.go:334] "Generic (PLEG): container finished" podID="a3498621-3413-4663-9414-4190355ab301" containerID="492799a792c6f741b01657a75d3865db8d1fbbc5aef6adb1a1124fda2323cc3e" exitCode=0 Nov 28 16:15:01 crc kubenswrapper[4884]: I1128 16:15:01.702413 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" event={"ID":"a3498621-3413-4663-9414-4190355ab301","Type":"ContainerDied","Data":"492799a792c6f741b01657a75d3865db8d1fbbc5aef6adb1a1124fda2323cc3e"} Nov 28 16:15:01 crc kubenswrapper[4884]: I1128 16:15:01.702703 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" event={"ID":"a3498621-3413-4663-9414-4190355ab301","Type":"ContainerStarted","Data":"51a3be80c1bc2ad1b35098cbbc2f10e4e2d3d659c9b9b120db37236193fc7061"} Nov 28 16:15:02 crc kubenswrapper[4884]: I1128 16:15:02.987965 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.070739 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6vxh\" (UniqueName: \"kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh\") pod \"a3498621-3413-4663-9414-4190355ab301\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.070895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume\") pod \"a3498621-3413-4663-9414-4190355ab301\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.070926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume\") pod \"a3498621-3413-4663-9414-4190355ab301\" (UID: \"a3498621-3413-4663-9414-4190355ab301\") " Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.071446 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume" (OuterVolumeSpecName: "config-volume") pod "a3498621-3413-4663-9414-4190355ab301" (UID: "a3498621-3413-4663-9414-4190355ab301"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.076645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh" (OuterVolumeSpecName: "kube-api-access-v6vxh") pod "a3498621-3413-4663-9414-4190355ab301" (UID: "a3498621-3413-4663-9414-4190355ab301"). InnerVolumeSpecName "kube-api-access-v6vxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.077703 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a3498621-3413-4663-9414-4190355ab301" (UID: "a3498621-3413-4663-9414-4190355ab301"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.172943 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a3498621-3413-4663-9414-4190355ab301-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.173031 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a3498621-3413-4663-9414-4190355ab301-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.173057 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6vxh\" (UniqueName: \"kubernetes.io/projected/a3498621-3413-4663-9414-4190355ab301-kube-api-access-v6vxh\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.718294 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" event={"ID":"a3498621-3413-4663-9414-4190355ab301","Type":"ContainerDied","Data":"51a3be80c1bc2ad1b35098cbbc2f10e4e2d3d659c9b9b120db37236193fc7061"} Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.718339 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51a3be80c1bc2ad1b35098cbbc2f10e4e2d3d659c9b9b120db37236193fc7061" Nov 28 16:15:03 crc kubenswrapper[4884]: I1128 16:15:03.718352 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8" Nov 28 16:15:04 crc kubenswrapper[4884]: I1128 16:15:04.055473 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn"] Nov 28 16:15:04 crc kubenswrapper[4884]: I1128 16:15:04.061356 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-rpdfn"] Nov 28 16:15:04 crc kubenswrapper[4884]: I1128 16:15:04.703537 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="256c7539-5f02-4f8f-9059-63fff2c34910" path="/var/lib/kubelet/pods/256c7539-5f02-4f8f-9059-63fff2c34910/volumes" Nov 28 16:15:28 crc kubenswrapper[4884]: I1128 16:15:28.448360 4884 scope.go:117] "RemoveContainer" containerID="8da1323b20d1a6ca4604d53fca58074534c6a8fe439d7f9ba16464983a1c9b38" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.031751 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:09 crc kubenswrapper[4884]: E1128 16:16:09.032696 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3498621-3413-4663-9414-4190355ab301" containerName="collect-profiles" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.032713 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3498621-3413-4663-9414-4190355ab301" containerName="collect-profiles" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.032884 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3498621-3413-4663-9414-4190355ab301" containerName="collect-profiles" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.034050 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.046939 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.191638 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8kl2\" (UniqueName: \"kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.192178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.192303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.293912 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.294046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8kl2\" (UniqueName: \"kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.294080 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.294520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.294683 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.313696 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8kl2\" (UniqueName: \"kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2\") pod \"community-operators-d79sj\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.357366 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:09 crc kubenswrapper[4884]: I1128 16:16:09.877479 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:09 crc kubenswrapper[4884]: W1128 16:16:09.877987 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbaa7ccf3_49f7_4276_a72b_3f2b06878009.slice/crio-6c06458670dbb98056f97fcfb2035f28b45a80411ac27cda587abb7a5ed69d6a WatchSource:0}: Error finding container 6c06458670dbb98056f97fcfb2035f28b45a80411ac27cda587abb7a5ed69d6a: Status 404 returned error can't find the container with id 6c06458670dbb98056f97fcfb2035f28b45a80411ac27cda587abb7a5ed69d6a Nov 28 16:16:10 crc kubenswrapper[4884]: I1128 16:16:10.300707 4884 generic.go:334] "Generic (PLEG): container finished" podID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerID="879e557886a9cb60be2201730302cf645d9d7573cf08c31aedf06d39e518b51b" exitCode=0 Nov 28 16:16:10 crc kubenswrapper[4884]: I1128 16:16:10.300776 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerDied","Data":"879e557886a9cb60be2201730302cf645d9d7573cf08c31aedf06d39e518b51b"} Nov 28 16:16:10 crc kubenswrapper[4884]: I1128 16:16:10.300806 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerStarted","Data":"6c06458670dbb98056f97fcfb2035f28b45a80411ac27cda587abb7a5ed69d6a"} Nov 28 16:16:12 crc kubenswrapper[4884]: I1128 16:16:12.320448 4884 generic.go:334] "Generic (PLEG): container finished" podID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerID="6ed8b3e8d460fb01c28dfa129a18ee4806daf9675fbb3c2766f5a314d7463517" exitCode=0 Nov 28 16:16:12 crc kubenswrapper[4884]: I1128 16:16:12.320560 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerDied","Data":"6ed8b3e8d460fb01c28dfa129a18ee4806daf9675fbb3c2766f5a314d7463517"} Nov 28 16:16:13 crc kubenswrapper[4884]: I1128 16:16:13.350200 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerStarted","Data":"987b93c0a4b8814d11c4877f02328a89dadd974b76a53850eab0bc5899ed6c90"} Nov 28 16:16:13 crc kubenswrapper[4884]: I1128 16:16:13.373380 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d79sj" podStartSLOduration=1.886755578 podStartE2EDuration="4.373357159s" podCreationTimestamp="2025-11-28 16:16:09 +0000 UTC" firstStartedPulling="2025-11-28 16:16:10.302673269 +0000 UTC m=+3409.865457110" lastFinishedPulling="2025-11-28 16:16:12.78927487 +0000 UTC m=+3412.352058691" observedRunningTime="2025-11-28 16:16:13.370259624 +0000 UTC m=+3412.933043415" watchObservedRunningTime="2025-11-28 16:16:13.373357159 +0000 UTC m=+3412.936140960" Nov 28 16:16:19 crc kubenswrapper[4884]: I1128 16:16:19.358511 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:19 crc kubenswrapper[4884]: I1128 16:16:19.359139 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:19 crc kubenswrapper[4884]: I1128 16:16:19.429646 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:19 crc kubenswrapper[4884]: I1128 16:16:19.500394 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:19 crc kubenswrapper[4884]: I1128 16:16:19.675420 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:21 crc kubenswrapper[4884]: I1128 16:16:21.413538 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d79sj" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="registry-server" containerID="cri-o://987b93c0a4b8814d11c4877f02328a89dadd974b76a53850eab0bc5899ed6c90" gracePeriod=2 Nov 28 16:16:22 crc kubenswrapper[4884]: I1128 16:16:22.424162 4884 generic.go:334] "Generic (PLEG): container finished" podID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerID="987b93c0a4b8814d11c4877f02328a89dadd974b76a53850eab0bc5899ed6c90" exitCode=0 Nov 28 16:16:22 crc kubenswrapper[4884]: I1128 16:16:22.424208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerDied","Data":"987b93c0a4b8814d11c4877f02328a89dadd974b76a53850eab0bc5899ed6c90"} Nov 28 16:16:22 crc kubenswrapper[4884]: I1128 16:16:22.991489 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.015037 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8kl2\" (UniqueName: \"kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2\") pod \"baa7ccf3-49f7-4276-a72b-3f2b06878009\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.016581 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities\") pod \"baa7ccf3-49f7-4276-a72b-3f2b06878009\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.016628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content\") pod \"baa7ccf3-49f7-4276-a72b-3f2b06878009\" (UID: \"baa7ccf3-49f7-4276-a72b-3f2b06878009\") " Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.018117 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities" (OuterVolumeSpecName: "utilities") pod "baa7ccf3-49f7-4276-a72b-3f2b06878009" (UID: "baa7ccf3-49f7-4276-a72b-3f2b06878009"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.024355 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2" (OuterVolumeSpecName: "kube-api-access-t8kl2") pod "baa7ccf3-49f7-4276-a72b-3f2b06878009" (UID: "baa7ccf3-49f7-4276-a72b-3f2b06878009"). InnerVolumeSpecName "kube-api-access-t8kl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.091027 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "baa7ccf3-49f7-4276-a72b-3f2b06878009" (UID: "baa7ccf3-49f7-4276-a72b-3f2b06878009"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.118801 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8kl2\" (UniqueName: \"kubernetes.io/projected/baa7ccf3-49f7-4276-a72b-3f2b06878009-kube-api-access-t8kl2\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.118889 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.118904 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baa7ccf3-49f7-4276-a72b-3f2b06878009-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.433934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d79sj" event={"ID":"baa7ccf3-49f7-4276-a72b-3f2b06878009","Type":"ContainerDied","Data":"6c06458670dbb98056f97fcfb2035f28b45a80411ac27cda587abb7a5ed69d6a"} Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.434025 4884 scope.go:117] "RemoveContainer" containerID="987b93c0a4b8814d11c4877f02328a89dadd974b76a53850eab0bc5899ed6c90" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.434050 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d79sj" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.452509 4884 scope.go:117] "RemoveContainer" containerID="6ed8b3e8d460fb01c28dfa129a18ee4806daf9675fbb3c2766f5a314d7463517" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.481498 4884 scope.go:117] "RemoveContainer" containerID="879e557886a9cb60be2201730302cf645d9d7573cf08c31aedf06d39e518b51b" Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.481645 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:23 crc kubenswrapper[4884]: I1128 16:16:23.489119 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d79sj"] Nov 28 16:16:24 crc kubenswrapper[4884]: I1128 16:16:24.707243 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" path="/var/lib/kubelet/pods/baa7ccf3-49f7-4276-a72b-3f2b06878009/volumes" Nov 28 16:16:51 crc kubenswrapper[4884]: I1128 16:16:51.243452 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:16:51 crc kubenswrapper[4884]: I1128 16:16:51.244464 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.961307 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:16:54 crc kubenswrapper[4884]: E1128 16:16:54.961882 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="registry-server" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.961895 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="registry-server" Nov 28 16:16:54 crc kubenswrapper[4884]: E1128 16:16:54.961923 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="extract-content" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.961929 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="extract-content" Nov 28 16:16:54 crc kubenswrapper[4884]: E1128 16:16:54.961943 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="extract-utilities" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.961950 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="extract-utilities" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.962316 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="baa7ccf3-49f7-4276-a72b-3f2b06878009" containerName="registry-server" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.963259 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:54 crc kubenswrapper[4884]: I1128 16:16:54.982676 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.082461 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.082522 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vrgx\" (UniqueName: \"kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.082548 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.184444 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.184563 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vrgx\" (UniqueName: \"kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.184611 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.184993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.185462 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.211221 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vrgx\" (UniqueName: \"kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx\") pod \"redhat-operators-t4j9q\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.281708 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:16:55 crc kubenswrapper[4884]: I1128 16:16:55.731137 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:16:56 crc kubenswrapper[4884]: I1128 16:16:56.727790 4884 generic.go:334] "Generic (PLEG): container finished" podID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerID="d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27" exitCode=0 Nov 28 16:16:56 crc kubenswrapper[4884]: I1128 16:16:56.727831 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerDied","Data":"d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27"} Nov 28 16:16:56 crc kubenswrapper[4884]: I1128 16:16:56.727853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerStarted","Data":"c0e02364243a199d592672178bbf457c0b5b37b19a7ec992fb11bb311eb5c781"} Nov 28 16:16:57 crc kubenswrapper[4884]: I1128 16:16:57.741700 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerStarted","Data":"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3"} Nov 28 16:16:58 crc kubenswrapper[4884]: I1128 16:16:58.754631 4884 generic.go:334] "Generic (PLEG): container finished" podID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerID="1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3" exitCode=0 Nov 28 16:16:58 crc kubenswrapper[4884]: I1128 16:16:58.754680 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerDied","Data":"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3"} Nov 28 16:16:59 crc kubenswrapper[4884]: I1128 16:16:59.766821 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerStarted","Data":"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6"} Nov 28 16:16:59 crc kubenswrapper[4884]: I1128 16:16:59.796729 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t4j9q" podStartSLOduration=3.226922303 podStartE2EDuration="5.796707248s" podCreationTimestamp="2025-11-28 16:16:54 +0000 UTC" firstStartedPulling="2025-11-28 16:16:56.729629956 +0000 UTC m=+3456.292413757" lastFinishedPulling="2025-11-28 16:16:59.299414851 +0000 UTC m=+3458.862198702" observedRunningTime="2025-11-28 16:16:59.794052753 +0000 UTC m=+3459.356836594" watchObservedRunningTime="2025-11-28 16:16:59.796707248 +0000 UTC m=+3459.359491049" Nov 28 16:17:05 crc kubenswrapper[4884]: I1128 16:17:05.282203 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:05 crc kubenswrapper[4884]: I1128 16:17:05.283021 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:06 crc kubenswrapper[4884]: I1128 16:17:06.324774 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t4j9q" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="registry-server" probeResult="failure" output=< Nov 28 16:17:06 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 16:17:06 crc kubenswrapper[4884]: > Nov 28 16:17:15 crc kubenswrapper[4884]: I1128 16:17:15.342768 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:15 crc kubenswrapper[4884]: I1128 16:17:15.396180 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:15 crc kubenswrapper[4884]: I1128 16:17:15.596409 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:17:16 crc kubenswrapper[4884]: I1128 16:17:16.914923 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t4j9q" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="registry-server" containerID="cri-o://08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6" gracePeriod=2 Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.369431 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.534945 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content\") pod \"41124959-14f0-4d3a-b0e5-213a0940fc54\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.534999 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities\") pod \"41124959-14f0-4d3a-b0e5-213a0940fc54\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.535069 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vrgx\" (UniqueName: \"kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx\") pod \"41124959-14f0-4d3a-b0e5-213a0940fc54\" (UID: \"41124959-14f0-4d3a-b0e5-213a0940fc54\") " Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.537282 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities" (OuterVolumeSpecName: "utilities") pod "41124959-14f0-4d3a-b0e5-213a0940fc54" (UID: "41124959-14f0-4d3a-b0e5-213a0940fc54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.542870 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx" (OuterVolumeSpecName: "kube-api-access-4vrgx") pod "41124959-14f0-4d3a-b0e5-213a0940fc54" (UID: "41124959-14f0-4d3a-b0e5-213a0940fc54"). InnerVolumeSpecName "kube-api-access-4vrgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.637020 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.637064 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vrgx\" (UniqueName: \"kubernetes.io/projected/41124959-14f0-4d3a-b0e5-213a0940fc54-kube-api-access-4vrgx\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.712076 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41124959-14f0-4d3a-b0e5-213a0940fc54" (UID: "41124959-14f0-4d3a-b0e5-213a0940fc54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.738501 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41124959-14f0-4d3a-b0e5-213a0940fc54-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.928553 4884 generic.go:334] "Generic (PLEG): container finished" podID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerID="08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6" exitCode=0 Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.928631 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerDied","Data":"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6"} Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.928658 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t4j9q" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.928701 4884 scope.go:117] "RemoveContainer" containerID="08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.928683 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t4j9q" event={"ID":"41124959-14f0-4d3a-b0e5-213a0940fc54","Type":"ContainerDied","Data":"c0e02364243a199d592672178bbf457c0b5b37b19a7ec992fb11bb311eb5c781"} Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.956532 4884 scope.go:117] "RemoveContainer" containerID="1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3" Nov 28 16:17:17 crc kubenswrapper[4884]: I1128 16:17:17.991709 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.000164 4884 scope.go:117] "RemoveContainer" containerID="d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.003716 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t4j9q"] Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.028334 4884 scope.go:117] "RemoveContainer" containerID="08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6" Nov 28 16:17:18 crc kubenswrapper[4884]: E1128 16:17:18.028895 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6\": container with ID starting with 08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6 not found: ID does not exist" containerID="08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.028931 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6"} err="failed to get container status \"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6\": rpc error: code = NotFound desc = could not find container \"08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6\": container with ID starting with 08049c1ea7855b3c7efe6a1e34cef9c140cc6821fd93e7cc6fd4fa0333dd46e6 not found: ID does not exist" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.028970 4884 scope.go:117] "RemoveContainer" containerID="1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3" Nov 28 16:17:18 crc kubenswrapper[4884]: E1128 16:17:18.029380 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3\": container with ID starting with 1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3 not found: ID does not exist" containerID="1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.029423 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3"} err="failed to get container status \"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3\": rpc error: code = NotFound desc = could not find container \"1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3\": container with ID starting with 1d4f135db3c010624a043841e42313c35017f1c12c304774b0f3676f2fef35b3 not found: ID does not exist" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.029455 4884 scope.go:117] "RemoveContainer" containerID="d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27" Nov 28 16:17:18 crc kubenswrapper[4884]: E1128 16:17:18.029885 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27\": container with ID starting with d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27 not found: ID does not exist" containerID="d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.029914 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27"} err="failed to get container status \"d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27\": rpc error: code = NotFound desc = could not find container \"d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27\": container with ID starting with d8e2bec99795f5990b7f72b1329b25df513e9c8d74a7b05055bdc125c5af6f27 not found: ID does not exist" Nov 28 16:17:18 crc kubenswrapper[4884]: I1128 16:17:18.703610 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" path="/var/lib/kubelet/pods/41124959-14f0-4d3a-b0e5-213a0940fc54/volumes" Nov 28 16:17:21 crc kubenswrapper[4884]: I1128 16:17:21.243206 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:17:21 crc kubenswrapper[4884]: I1128 16:17:21.243599 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:17:51 crc kubenswrapper[4884]: I1128 16:17:51.243336 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:17:51 crc kubenswrapper[4884]: I1128 16:17:51.243859 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:17:51 crc kubenswrapper[4884]: I1128 16:17:51.243908 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:17:51 crc kubenswrapper[4884]: I1128 16:17:51.244582 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:17:51 crc kubenswrapper[4884]: I1128 16:17:51.244643 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb" gracePeriod=600 Nov 28 16:17:52 crc kubenswrapper[4884]: I1128 16:17:52.213522 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb" exitCode=0 Nov 28 16:17:52 crc kubenswrapper[4884]: I1128 16:17:52.213583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb"} Nov 28 16:17:52 crc kubenswrapper[4884]: I1128 16:17:52.214301 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede"} Nov 28 16:17:52 crc kubenswrapper[4884]: I1128 16:17:52.214335 4884 scope.go:117] "RemoveContainer" containerID="0e873a18c0e96317500e22b267d6c1c1b2355c830916daf571b26f3fd35ff325" Nov 28 16:19:51 crc kubenswrapper[4884]: I1128 16:19:51.243112 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:19:51 crc kubenswrapper[4884]: I1128 16:19:51.243564 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:21 crc kubenswrapper[4884]: I1128 16:20:21.243176 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:21 crc kubenswrapper[4884]: I1128 16:20:21.243888 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.243378 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.243999 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.244056 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.244801 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.244871 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" gracePeriod=600 Nov 28 16:20:51 crc kubenswrapper[4884]: E1128 16:20:51.369993 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.644308 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" exitCode=0 Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.644650 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede"} Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.644683 4884 scope.go:117] "RemoveContainer" containerID="ec8a4697997e375bd054ae12a2b413d8a880cda9660a715ce0fe194d42ebc6eb" Nov 28 16:20:51 crc kubenswrapper[4884]: I1128 16:20:51.645197 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:20:51 crc kubenswrapper[4884]: E1128 16:20:51.645438 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:21:05 crc kubenswrapper[4884]: I1128 16:21:05.689031 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:21:05 crc kubenswrapper[4884]: E1128 16:21:05.689685 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:21:17 crc kubenswrapper[4884]: I1128 16:21:17.688010 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:21:17 crc kubenswrapper[4884]: E1128 16:21:17.688846 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:21:32 crc kubenswrapper[4884]: I1128 16:21:32.689327 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:21:32 crc kubenswrapper[4884]: E1128 16:21:32.690645 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:21:44 crc kubenswrapper[4884]: I1128 16:21:44.688403 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:21:44 crc kubenswrapper[4884]: E1128 16:21:44.689245 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:21:57 crc kubenswrapper[4884]: I1128 16:21:57.688964 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:21:57 crc kubenswrapper[4884]: E1128 16:21:57.689799 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:22:12 crc kubenswrapper[4884]: I1128 16:22:12.688242 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:22:12 crc kubenswrapper[4884]: E1128 16:22:12.701873 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:22:24 crc kubenswrapper[4884]: I1128 16:22:24.688611 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:22:24 crc kubenswrapper[4884]: E1128 16:22:24.689366 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:22:39 crc kubenswrapper[4884]: I1128 16:22:39.689003 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:22:39 crc kubenswrapper[4884]: E1128 16:22:39.689859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:22:53 crc kubenswrapper[4884]: I1128 16:22:53.689227 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:22:53 crc kubenswrapper[4884]: E1128 16:22:53.690329 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.190532 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:04 crc kubenswrapper[4884]: E1128 16:23:04.191530 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="extract-content" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.191547 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="extract-content" Nov 28 16:23:04 crc kubenswrapper[4884]: E1128 16:23:04.191567 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="extract-utilities" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.191574 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="extract-utilities" Nov 28 16:23:04 crc kubenswrapper[4884]: E1128 16:23:04.191588 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="registry-server" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.191596 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="registry-server" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.191805 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="41124959-14f0-4d3a-b0e5-213a0940fc54" containerName="registry-server" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.193157 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.197658 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.388794 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.389214 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4s6z\" (UniqueName: \"kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.389297 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.490716 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.490777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.490805 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4s6z\" (UniqueName: \"kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.491582 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.491891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.747157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4s6z\" (UniqueName: \"kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z\") pod \"certified-operators-xd4hp\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:04 crc kubenswrapper[4884]: I1128 16:23:04.828801 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:05 crc kubenswrapper[4884]: I1128 16:23:05.047126 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:05 crc kubenswrapper[4884]: I1128 16:23:05.618155 4884 generic.go:334] "Generic (PLEG): container finished" podID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerID="4d18c7b38bd8c984a0902b6ec02f9d8a81fb6dc529dc1c170920a7e88b0dc495" exitCode=0 Nov 28 16:23:05 crc kubenswrapper[4884]: I1128 16:23:05.618218 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerDied","Data":"4d18c7b38bd8c984a0902b6ec02f9d8a81fb6dc529dc1c170920a7e88b0dc495"} Nov 28 16:23:05 crc kubenswrapper[4884]: I1128 16:23:05.618249 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerStarted","Data":"d5d7d2a1f6a94e794bb8fbd5c7e042be4a1594f8de7c665c37e546c89d9032e3"} Nov 28 16:23:05 crc kubenswrapper[4884]: I1128 16:23:05.623710 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:23:07 crc kubenswrapper[4884]: I1128 16:23:07.635814 4884 generic.go:334] "Generic (PLEG): container finished" podID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerID="c365ace70bb4a75dc89cf4629f5e0907b6a4e97d5b3b3f5a91764a11c1bb12d0" exitCode=0 Nov 28 16:23:07 crc kubenswrapper[4884]: I1128 16:23:07.635876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerDied","Data":"c365ace70bb4a75dc89cf4629f5e0907b6a4e97d5b3b3f5a91764a11c1bb12d0"} Nov 28 16:23:07 crc kubenswrapper[4884]: I1128 16:23:07.688912 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:23:07 crc kubenswrapper[4884]: E1128 16:23:07.689232 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.171925 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.174397 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.180103 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.249942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.249992 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.250032 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x645\" (UniqueName: \"kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.352714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.352789 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.352843 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x645\" (UniqueName: \"kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.354235 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.354330 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.371843 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x645\" (UniqueName: \"kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645\") pod \"redhat-marketplace-bgpfh\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.527857 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.645529 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerStarted","Data":"0059dba429d185f226b0b3f8e0b8bec474cb10ac7c63f90b31c34fbf3259304e"} Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.676756 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xd4hp" podStartSLOduration=2.205562329 podStartE2EDuration="4.676732036s" podCreationTimestamp="2025-11-28 16:23:04 +0000 UTC" firstStartedPulling="2025-11-28 16:23:05.623426985 +0000 UTC m=+3825.186210806" lastFinishedPulling="2025-11-28 16:23:08.094596702 +0000 UTC m=+3827.657380513" observedRunningTime="2025-11-28 16:23:08.666488085 +0000 UTC m=+3828.229271886" watchObservedRunningTime="2025-11-28 16:23:08.676732036 +0000 UTC m=+3828.239515837" Nov 28 16:23:08 crc kubenswrapper[4884]: I1128 16:23:08.972191 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:09 crc kubenswrapper[4884]: I1128 16:23:09.656988 4884 generic.go:334] "Generic (PLEG): container finished" podID="40796361-134c-4f02-84de-f5fa8f18a50c" containerID="949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89" exitCode=0 Nov 28 16:23:09 crc kubenswrapper[4884]: I1128 16:23:09.657103 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerDied","Data":"949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89"} Nov 28 16:23:09 crc kubenswrapper[4884]: I1128 16:23:09.657428 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerStarted","Data":"ef0a00866b3e7638745cad0626697d769812223d2c5d138c19f01b3bfb3fd9ac"} Nov 28 16:23:11 crc kubenswrapper[4884]: I1128 16:23:11.674438 4884 generic.go:334] "Generic (PLEG): container finished" podID="40796361-134c-4f02-84de-f5fa8f18a50c" containerID="41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216" exitCode=0 Nov 28 16:23:11 crc kubenswrapper[4884]: I1128 16:23:11.674534 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerDied","Data":"41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216"} Nov 28 16:23:13 crc kubenswrapper[4884]: I1128 16:23:13.691643 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerStarted","Data":"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7"} Nov 28 16:23:13 crc kubenswrapper[4884]: I1128 16:23:13.711854 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bgpfh" podStartSLOduration=2.467126673 podStartE2EDuration="5.711831592s" podCreationTimestamp="2025-11-28 16:23:08 +0000 UTC" firstStartedPulling="2025-11-28 16:23:09.659662873 +0000 UTC m=+3829.222446694" lastFinishedPulling="2025-11-28 16:23:12.904367812 +0000 UTC m=+3832.467151613" observedRunningTime="2025-11-28 16:23:13.70845749 +0000 UTC m=+3833.271241291" watchObservedRunningTime="2025-11-28 16:23:13.711831592 +0000 UTC m=+3833.274615393" Nov 28 16:23:14 crc kubenswrapper[4884]: I1128 16:23:14.829725 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:14 crc kubenswrapper[4884]: I1128 16:23:14.830587 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:15 crc kubenswrapper[4884]: I1128 16:23:15.173591 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:15 crc kubenswrapper[4884]: I1128 16:23:15.746986 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:15 crc kubenswrapper[4884]: I1128 16:23:15.791920 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:17 crc kubenswrapper[4884]: I1128 16:23:17.716583 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xd4hp" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="registry-server" containerID="cri-o://0059dba429d185f226b0b3f8e0b8bec474cb10ac7c63f90b31c34fbf3259304e" gracePeriod=2 Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.528488 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.528585 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.585652 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.689036 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:23:18 crc kubenswrapper[4884]: E1128 16:23:18.689616 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.723950 4884 generic.go:334] "Generic (PLEG): container finished" podID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerID="0059dba429d185f226b0b3f8e0b8bec474cb10ac7c63f90b31c34fbf3259304e" exitCode=0 Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.723992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerDied","Data":"0059dba429d185f226b0b3f8e0b8bec474cb10ac7c63f90b31c34fbf3259304e"} Nov 28 16:23:18 crc kubenswrapper[4884]: I1128 16:23:18.766109 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.154650 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.259291 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.404873 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4s6z\" (UniqueName: \"kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z\") pod \"ea79b53c-773e-456b-ae68-e27f59d01fc6\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.404942 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content\") pod \"ea79b53c-773e-456b-ae68-e27f59d01fc6\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.404981 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities\") pod \"ea79b53c-773e-456b-ae68-e27f59d01fc6\" (UID: \"ea79b53c-773e-456b-ae68-e27f59d01fc6\") " Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.405990 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities" (OuterVolumeSpecName: "utilities") pod "ea79b53c-773e-456b-ae68-e27f59d01fc6" (UID: "ea79b53c-773e-456b-ae68-e27f59d01fc6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.410072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z" (OuterVolumeSpecName: "kube-api-access-n4s6z") pod "ea79b53c-773e-456b-ae68-e27f59d01fc6" (UID: "ea79b53c-773e-456b-ae68-e27f59d01fc6"). InnerVolumeSpecName "kube-api-access-n4s6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.461340 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea79b53c-773e-456b-ae68-e27f59d01fc6" (UID: "ea79b53c-773e-456b-ae68-e27f59d01fc6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.508575 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4s6z\" (UniqueName: \"kubernetes.io/projected/ea79b53c-773e-456b-ae68-e27f59d01fc6-kube-api-access-n4s6z\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.508624 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.508637 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea79b53c-773e-456b-ae68-e27f59d01fc6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.734913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xd4hp" event={"ID":"ea79b53c-773e-456b-ae68-e27f59d01fc6","Type":"ContainerDied","Data":"d5d7d2a1f6a94e794bb8fbd5c7e042be4a1594f8de7c665c37e546c89d9032e3"} Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.735001 4884 scope.go:117] "RemoveContainer" containerID="0059dba429d185f226b0b3f8e0b8bec474cb10ac7c63f90b31c34fbf3259304e" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.735025 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xd4hp" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.766097 4884 scope.go:117] "RemoveContainer" containerID="c365ace70bb4a75dc89cf4629f5e0907b6a4e97d5b3b3f5a91764a11c1bb12d0" Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.782732 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.789394 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xd4hp"] Nov 28 16:23:19 crc kubenswrapper[4884]: I1128 16:23:19.813386 4884 scope.go:117] "RemoveContainer" containerID="4d18c7b38bd8c984a0902b6ec02f9d8a81fb6dc529dc1c170920a7e88b0dc495" Nov 28 16:23:20 crc kubenswrapper[4884]: I1128 16:23:20.697422 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" path="/var/lib/kubelet/pods/ea79b53c-773e-456b-ae68-e27f59d01fc6/volumes" Nov 28 16:23:20 crc kubenswrapper[4884]: I1128 16:23:20.742108 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bgpfh" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="registry-server" containerID="cri-o://fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7" gracePeriod=2 Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.129831 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.130892 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x645\" (UniqueName: \"kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645\") pod \"40796361-134c-4f02-84de-f5fa8f18a50c\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.130960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content\") pod \"40796361-134c-4f02-84de-f5fa8f18a50c\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.131004 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities\") pod \"40796361-134c-4f02-84de-f5fa8f18a50c\" (UID: \"40796361-134c-4f02-84de-f5fa8f18a50c\") " Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.131896 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities" (OuterVolumeSpecName: "utilities") pod "40796361-134c-4f02-84de-f5fa8f18a50c" (UID: "40796361-134c-4f02-84de-f5fa8f18a50c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.135010 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645" (OuterVolumeSpecName: "kube-api-access-2x645") pod "40796361-134c-4f02-84de-f5fa8f18a50c" (UID: "40796361-134c-4f02-84de-f5fa8f18a50c"). InnerVolumeSpecName "kube-api-access-2x645". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.156014 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40796361-134c-4f02-84de-f5fa8f18a50c" (UID: "40796361-134c-4f02-84de-f5fa8f18a50c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.231960 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.232002 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40796361-134c-4f02-84de-f5fa8f18a50c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.232015 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x645\" (UniqueName: \"kubernetes.io/projected/40796361-134c-4f02-84de-f5fa8f18a50c-kube-api-access-2x645\") on node \"crc\" DevicePath \"\"" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.772722 4884 generic.go:334] "Generic (PLEG): container finished" podID="40796361-134c-4f02-84de-f5fa8f18a50c" containerID="fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7" exitCode=0 Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.772801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerDied","Data":"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7"} Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.772824 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bgpfh" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.773278 4884 scope.go:117] "RemoveContainer" containerID="fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.787488 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bgpfh" event={"ID":"40796361-134c-4f02-84de-f5fa8f18a50c","Type":"ContainerDied","Data":"ef0a00866b3e7638745cad0626697d769812223d2c5d138c19f01b3bfb3fd9ac"} Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.811444 4884 scope.go:117] "RemoveContainer" containerID="41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.816523 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.822781 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bgpfh"] Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.832675 4884 scope.go:117] "RemoveContainer" containerID="949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.853754 4884 scope.go:117] "RemoveContainer" containerID="fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7" Nov 28 16:23:21 crc kubenswrapper[4884]: E1128 16:23:21.854292 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7\": container with ID starting with fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7 not found: ID does not exist" containerID="fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.854342 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7"} err="failed to get container status \"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7\": rpc error: code = NotFound desc = could not find container \"fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7\": container with ID starting with fe08262c0290bfd6d4636696bb7f4c29c9b5642d5f9e0d67c1b37db8477aacd7 not found: ID does not exist" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.854376 4884 scope.go:117] "RemoveContainer" containerID="41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216" Nov 28 16:23:21 crc kubenswrapper[4884]: E1128 16:23:21.854670 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216\": container with ID starting with 41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216 not found: ID does not exist" containerID="41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.854713 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216"} err="failed to get container status \"41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216\": rpc error: code = NotFound desc = could not find container \"41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216\": container with ID starting with 41bcc97791fce11417acc7fc56a15887ca7e09e0bd2c210f5b78fac389ec8216 not found: ID does not exist" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.854735 4884 scope.go:117] "RemoveContainer" containerID="949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89" Nov 28 16:23:21 crc kubenswrapper[4884]: E1128 16:23:21.854934 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89\": container with ID starting with 949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89 not found: ID does not exist" containerID="949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89" Nov 28 16:23:21 crc kubenswrapper[4884]: I1128 16:23:21.854963 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89"} err="failed to get container status \"949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89\": rpc error: code = NotFound desc = could not find container \"949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89\": container with ID starting with 949206a755804fb49f885277038a47ced7f6c53c4a48c5cabcdb7eed87d82a89 not found: ID does not exist" Nov 28 16:23:22 crc kubenswrapper[4884]: I1128 16:23:22.696704 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" path="/var/lib/kubelet/pods/40796361-134c-4f02-84de-f5fa8f18a50c/volumes" Nov 28 16:23:29 crc kubenswrapper[4884]: I1128 16:23:29.689027 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:23:29 crc kubenswrapper[4884]: E1128 16:23:29.690050 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:23:44 crc kubenswrapper[4884]: I1128 16:23:44.688379 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:23:44 crc kubenswrapper[4884]: E1128 16:23:44.689377 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:23:57 crc kubenswrapper[4884]: I1128 16:23:57.689030 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:23:57 crc kubenswrapper[4884]: E1128 16:23:57.689778 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:24:08 crc kubenswrapper[4884]: I1128 16:24:08.688487 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:24:08 crc kubenswrapper[4884]: E1128 16:24:08.689219 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:24:20 crc kubenswrapper[4884]: I1128 16:24:20.692301 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:24:20 crc kubenswrapper[4884]: E1128 16:24:20.692963 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:24:35 crc kubenswrapper[4884]: I1128 16:24:35.687629 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:24:35 crc kubenswrapper[4884]: E1128 16:24:35.688459 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:24:50 crc kubenswrapper[4884]: I1128 16:24:50.699840 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:24:50 crc kubenswrapper[4884]: E1128 16:24:50.701218 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:25:02 crc kubenswrapper[4884]: I1128 16:25:02.688459 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:25:02 crc kubenswrapper[4884]: E1128 16:25:02.689463 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:25:15 crc kubenswrapper[4884]: I1128 16:25:15.688568 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:25:15 crc kubenswrapper[4884]: E1128 16:25:15.689322 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:25:28 crc kubenswrapper[4884]: I1128 16:25:28.689038 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:25:28 crc kubenswrapper[4884]: E1128 16:25:28.689878 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:25:40 crc kubenswrapper[4884]: I1128 16:25:40.694365 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:25:40 crc kubenswrapper[4884]: E1128 16:25:40.695161 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:25:54 crc kubenswrapper[4884]: I1128 16:25:54.688863 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:25:54 crc kubenswrapper[4884]: I1128 16:25:54.932524 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030"} Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.161833 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162715 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162730 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162746 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162753 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162766 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="extract-content" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162773 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="extract-content" Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162792 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="extract-utilities" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162801 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="extract-utilities" Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162812 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="extract-content" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162819 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="extract-content" Nov 28 16:26:59 crc kubenswrapper[4884]: E1128 16:26:59.162841 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="extract-utilities" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.162849 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="extract-utilities" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.163013 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="40796361-134c-4f02-84de-f5fa8f18a50c" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.163031 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea79b53c-773e-456b-ae68-e27f59d01fc6" containerName="registry-server" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.164309 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.180795 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.264103 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.264165 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cg7b\" (UniqueName: \"kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.264203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.365685 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.365819 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.365847 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cg7b\" (UniqueName: \"kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.366472 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.366544 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.391038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cg7b\" (UniqueName: \"kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b\") pod \"community-operators-466rd\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.489215 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:26:59 crc kubenswrapper[4884]: I1128 16:26:59.983552 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:27:00 crc kubenswrapper[4884]: I1128 16:27:00.425958 4884 generic.go:334] "Generic (PLEG): container finished" podID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerID="a7bbcada2f900a85f6e27b6a34116b4a58d2dd71c768f9a2998adaefcfba497e" exitCode=0 Nov 28 16:27:00 crc kubenswrapper[4884]: I1128 16:27:00.426435 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerDied","Data":"a7bbcada2f900a85f6e27b6a34116b4a58d2dd71c768f9a2998adaefcfba497e"} Nov 28 16:27:00 crc kubenswrapper[4884]: I1128 16:27:00.426676 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerStarted","Data":"3f0f88f169c6ed0a8adcdc358e7985883c5a1ce4af7e9ec463a68cebb94e54d8"} Nov 28 16:27:02 crc kubenswrapper[4884]: I1128 16:27:02.440533 4884 generic.go:334] "Generic (PLEG): container finished" podID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerID="1cc2c4933f4c6da297937aedf29b9800584141ea6ab1314816ff0943c3b6f8a4" exitCode=0 Nov 28 16:27:02 crc kubenswrapper[4884]: I1128 16:27:02.440628 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerDied","Data":"1cc2c4933f4c6da297937aedf29b9800584141ea6ab1314816ff0943c3b6f8a4"} Nov 28 16:27:03 crc kubenswrapper[4884]: I1128 16:27:03.453100 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerStarted","Data":"08b12a8bfc7f09df00665058cd4249c38eef1b0345768651441e649829e69fba"} Nov 28 16:27:03 crc kubenswrapper[4884]: I1128 16:27:03.477064 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-466rd" podStartSLOduration=1.983299368 podStartE2EDuration="4.477048008s" podCreationTimestamp="2025-11-28 16:26:59 +0000 UTC" firstStartedPulling="2025-11-28 16:27:00.428650947 +0000 UTC m=+4059.991434798" lastFinishedPulling="2025-11-28 16:27:02.922399637 +0000 UTC m=+4062.485183438" observedRunningTime="2025-11-28 16:27:03.473395428 +0000 UTC m=+4063.036179229" watchObservedRunningTime="2025-11-28 16:27:03.477048008 +0000 UTC m=+4063.039831809" Nov 28 16:27:09 crc kubenswrapper[4884]: I1128 16:27:09.489570 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:09 crc kubenswrapper[4884]: I1128 16:27:09.490356 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:09 crc kubenswrapper[4884]: I1128 16:27:09.783851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:10 crc kubenswrapper[4884]: I1128 16:27:10.547851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:10 crc kubenswrapper[4884]: I1128 16:27:10.598705 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:27:12 crc kubenswrapper[4884]: I1128 16:27:12.517596 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-466rd" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="registry-server" containerID="cri-o://08b12a8bfc7f09df00665058cd4249c38eef1b0345768651441e649829e69fba" gracePeriod=2 Nov 28 16:27:13 crc kubenswrapper[4884]: I1128 16:27:13.527040 4884 generic.go:334] "Generic (PLEG): container finished" podID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerID="08b12a8bfc7f09df00665058cd4249c38eef1b0345768651441e649829e69fba" exitCode=0 Nov 28 16:27:13 crc kubenswrapper[4884]: I1128 16:27:13.527261 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerDied","Data":"08b12a8bfc7f09df00665058cd4249c38eef1b0345768651441e649829e69fba"} Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.052149 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.068155 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities\") pod \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.068237 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content\") pod \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.068317 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cg7b\" (UniqueName: \"kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b\") pod \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\" (UID: \"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29\") " Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.072587 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities" (OuterVolumeSpecName: "utilities") pod "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" (UID: "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.078373 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b" (OuterVolumeSpecName: "kube-api-access-6cg7b") pod "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" (UID: "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29"). InnerVolumeSpecName "kube-api-access-6cg7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.141281 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" (UID: "dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.170087 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.170162 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cg7b\" (UniqueName: \"kubernetes.io/projected/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-kube-api-access-6cg7b\") on node \"crc\" DevicePath \"\"" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.170183 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.537575 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-466rd" event={"ID":"dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29","Type":"ContainerDied","Data":"3f0f88f169c6ed0a8adcdc358e7985883c5a1ce4af7e9ec463a68cebb94e54d8"} Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.537666 4884 scope.go:117] "RemoveContainer" containerID="08b12a8bfc7f09df00665058cd4249c38eef1b0345768651441e649829e69fba" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.537671 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-466rd" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.557075 4884 scope.go:117] "RemoveContainer" containerID="1cc2c4933f4c6da297937aedf29b9800584141ea6ab1314816ff0943c3b6f8a4" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.577913 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.587915 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-466rd"] Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.598290 4884 scope.go:117] "RemoveContainer" containerID="a7bbcada2f900a85f6e27b6a34116b4a58d2dd71c768f9a2998adaefcfba497e" Nov 28 16:27:14 crc kubenswrapper[4884]: I1128 16:27:14.699305 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" path="/var/lib/kubelet/pods/dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29/volumes" Nov 28 16:28:21 crc kubenswrapper[4884]: I1128 16:28:21.243583 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:28:21 crc kubenswrapper[4884]: I1128 16:28:21.244185 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.036138 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:28:51 crc kubenswrapper[4884]: E1128 16:28:51.036929 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="extract-utilities" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.036941 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="extract-utilities" Nov 28 16:28:51 crc kubenswrapper[4884]: E1128 16:28:51.036951 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="extract-content" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.036957 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="extract-content" Nov 28 16:28:51 crc kubenswrapper[4884]: E1128 16:28:51.036969 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="registry-server" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.036976 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="registry-server" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.037144 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfc3ae61-0c4e-4cfd-bf64-d6fba6731c29" containerName="registry-server" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.038123 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.052641 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.201938 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkw78\" (UniqueName: \"kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.202002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.202060 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.243367 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.243429 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.304067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.304227 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkw78\" (UniqueName: \"kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.304252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.304908 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.305012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.332392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkw78\" (UniqueName: \"kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78\") pod \"redhat-operators-rtzc5\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.357912 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:28:51 crc kubenswrapper[4884]: I1128 16:28:51.775713 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:28:52 crc kubenswrapper[4884]: I1128 16:28:52.284506 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerID="f686f2bf9041545be4a9f9ccbab4e55e3caff1ee23c623cfbc23160f787b66fc" exitCode=0 Nov 28 16:28:52 crc kubenswrapper[4884]: I1128 16:28:52.284786 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerDied","Data":"f686f2bf9041545be4a9f9ccbab4e55e3caff1ee23c623cfbc23160f787b66fc"} Nov 28 16:28:52 crc kubenswrapper[4884]: I1128 16:28:52.284810 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerStarted","Data":"40ede6bb0af2272336cf9bdaae4e34a217858a85abbe78acbce9388ee1bb6446"} Nov 28 16:28:52 crc kubenswrapper[4884]: I1128 16:28:52.286972 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:28:54 crc kubenswrapper[4884]: I1128 16:28:54.301895 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerID="b4f6dcdd0d0887e9c47057468b4519789df40db8afe5f56b2b157897ee64cefd" exitCode=0 Nov 28 16:28:54 crc kubenswrapper[4884]: I1128 16:28:54.301976 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerDied","Data":"b4f6dcdd0d0887e9c47057468b4519789df40db8afe5f56b2b157897ee64cefd"} Nov 28 16:28:55 crc kubenswrapper[4884]: I1128 16:28:55.309867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerStarted","Data":"1e4bb7f4f29479e91753c9b30ff7a78f0b1f2f4867321d0542f7f383d77126a0"} Nov 28 16:28:55 crc kubenswrapper[4884]: I1128 16:28:55.333202 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rtzc5" podStartSLOduration=1.879287608 podStartE2EDuration="4.333181749s" podCreationTimestamp="2025-11-28 16:28:51 +0000 UTC" firstStartedPulling="2025-11-28 16:28:52.286729526 +0000 UTC m=+4171.849513327" lastFinishedPulling="2025-11-28 16:28:54.740623667 +0000 UTC m=+4174.303407468" observedRunningTime="2025-11-28 16:28:55.326184677 +0000 UTC m=+4174.888968498" watchObservedRunningTime="2025-11-28 16:28:55.333181749 +0000 UTC m=+4174.895965550" Nov 28 16:29:01 crc kubenswrapper[4884]: I1128 16:29:01.358261 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:01 crc kubenswrapper[4884]: I1128 16:29:01.359639 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:01 crc kubenswrapper[4884]: I1128 16:29:01.400037 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:02 crc kubenswrapper[4884]: I1128 16:29:02.401126 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:02 crc kubenswrapper[4884]: I1128 16:29:02.457270 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:29:04 crc kubenswrapper[4884]: I1128 16:29:04.374879 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rtzc5" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="registry-server" containerID="cri-o://1e4bb7f4f29479e91753c9b30ff7a78f0b1f2f4867321d0542f7f383d77126a0" gracePeriod=2 Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.395892 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerID="1e4bb7f4f29479e91753c9b30ff7a78f0b1f2f4867321d0542f7f383d77126a0" exitCode=0 Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.395931 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerDied","Data":"1e4bb7f4f29479e91753c9b30ff7a78f0b1f2f4867321d0542f7f383d77126a0"} Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.481743 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.522244 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities\") pod \"f7779862-944c-457e-8fc8-ca5b2ab8c053\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.522282 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content\") pod \"f7779862-944c-457e-8fc8-ca5b2ab8c053\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.522308 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkw78\" (UniqueName: \"kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78\") pod \"f7779862-944c-457e-8fc8-ca5b2ab8c053\" (UID: \"f7779862-944c-457e-8fc8-ca5b2ab8c053\") " Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.525857 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities" (OuterVolumeSpecName: "utilities") pod "f7779862-944c-457e-8fc8-ca5b2ab8c053" (UID: "f7779862-944c-457e-8fc8-ca5b2ab8c053"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.528333 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78" (OuterVolumeSpecName: "kube-api-access-qkw78") pod "f7779862-944c-457e-8fc8-ca5b2ab8c053" (UID: "f7779862-944c-457e-8fc8-ca5b2ab8c053"). InnerVolumeSpecName "kube-api-access-qkw78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.623171 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkw78\" (UniqueName: \"kubernetes.io/projected/f7779862-944c-457e-8fc8-ca5b2ab8c053-kube-api-access-qkw78\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.623208 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.643458 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7779862-944c-457e-8fc8-ca5b2ab8c053" (UID: "f7779862-944c-457e-8fc8-ca5b2ab8c053"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:29:07 crc kubenswrapper[4884]: I1128 16:29:07.724749 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7779862-944c-457e-8fc8-ca5b2ab8c053-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.406680 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rtzc5" event={"ID":"f7779862-944c-457e-8fc8-ca5b2ab8c053","Type":"ContainerDied","Data":"40ede6bb0af2272336cf9bdaae4e34a217858a85abbe78acbce9388ee1bb6446"} Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.406729 4884 scope.go:117] "RemoveContainer" containerID="1e4bb7f4f29479e91753c9b30ff7a78f0b1f2f4867321d0542f7f383d77126a0" Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.406745 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rtzc5" Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.433313 4884 scope.go:117] "RemoveContainer" containerID="b4f6dcdd0d0887e9c47057468b4519789df40db8afe5f56b2b157897ee64cefd" Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.451869 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.463762 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rtzc5"] Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.469945 4884 scope.go:117] "RemoveContainer" containerID="f686f2bf9041545be4a9f9ccbab4e55e3caff1ee23c623cfbc23160f787b66fc" Nov 28 16:29:08 crc kubenswrapper[4884]: I1128 16:29:08.700925 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" path="/var/lib/kubelet/pods/f7779862-944c-457e-8fc8-ca5b2ab8c053/volumes" Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.242718 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.243303 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.243348 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.243897 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.243953 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030" gracePeriod=600 Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.510640 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030" exitCode=0 Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.510714 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030"} Nov 28 16:29:21 crc kubenswrapper[4884]: I1128 16:29:21.510916 4884 scope.go:117] "RemoveContainer" containerID="482eec9ca0399abfce9cfbc816ba300087666eca35e2359e299c2889fc03dede" Nov 28 16:29:22 crc kubenswrapper[4884]: I1128 16:29:22.518603 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549"} Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.184899 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4"] Nov 28 16:30:00 crc kubenswrapper[4884]: E1128 16:30:00.185755 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.185769 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4884]: E1128 16:30:00.185799 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.185806 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4884]: E1128 16:30:00.185823 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.185829 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.185961 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7779862-944c-457e-8fc8-ca5b2ab8c053" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.186595 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.193365 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.193632 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4"] Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.193398 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.307653 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.307722 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flkpr\" (UniqueName: \"kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.307756 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.409512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.409872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flkpr\" (UniqueName: \"kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.410001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.411312 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.417888 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.427139 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flkpr\" (UniqueName: \"kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr\") pod \"collect-profiles-29405790-vjgb4\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.505478 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:00 crc kubenswrapper[4884]: I1128 16:30:00.956943 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4"] Nov 28 16:30:01 crc kubenswrapper[4884]: I1128 16:30:01.808467 4884 generic.go:334] "Generic (PLEG): container finished" podID="4e3ebe02-37c4-4e44-b874-ef4cb3189717" containerID="eb9c0ea9c72eed46045fe53d2be5ff16a894be034add7c6cb1ddf1a52794d064" exitCode=0 Nov 28 16:30:01 crc kubenswrapper[4884]: I1128 16:30:01.808567 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" event={"ID":"4e3ebe02-37c4-4e44-b874-ef4cb3189717","Type":"ContainerDied","Data":"eb9c0ea9c72eed46045fe53d2be5ff16a894be034add7c6cb1ddf1a52794d064"} Nov 28 16:30:01 crc kubenswrapper[4884]: I1128 16:30:01.808747 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" event={"ID":"4e3ebe02-37c4-4e44-b874-ef4cb3189717","Type":"ContainerStarted","Data":"2eb4d8de3fcc0d1b7867ba3cd88c49c08bf767cc4ebb6f421925ad9078151dc4"} Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.076224 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.151617 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume\") pod \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.151715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume\") pod \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.151779 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flkpr\" (UniqueName: \"kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr\") pod \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\" (UID: \"4e3ebe02-37c4-4e44-b874-ef4cb3189717\") " Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.152846 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume" (OuterVolumeSpecName: "config-volume") pod "4e3ebe02-37c4-4e44-b874-ef4cb3189717" (UID: "4e3ebe02-37c4-4e44-b874-ef4cb3189717"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.157917 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr" (OuterVolumeSpecName: "kube-api-access-flkpr") pod "4e3ebe02-37c4-4e44-b874-ef4cb3189717" (UID: "4e3ebe02-37c4-4e44-b874-ef4cb3189717"). InnerVolumeSpecName "kube-api-access-flkpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.173394 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4e3ebe02-37c4-4e44-b874-ef4cb3189717" (UID: "4e3ebe02-37c4-4e44-b874-ef4cb3189717"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.253431 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flkpr\" (UniqueName: \"kubernetes.io/projected/4e3ebe02-37c4-4e44-b874-ef4cb3189717-kube-api-access-flkpr\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.253474 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e3ebe02-37c4-4e44-b874-ef4cb3189717-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.253486 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e3ebe02-37c4-4e44-b874-ef4cb3189717-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.824787 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" event={"ID":"4e3ebe02-37c4-4e44-b874-ef4cb3189717","Type":"ContainerDied","Data":"2eb4d8de3fcc0d1b7867ba3cd88c49c08bf767cc4ebb6f421925ad9078151dc4"} Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.824840 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2eb4d8de3fcc0d1b7867ba3cd88c49c08bf767cc4ebb6f421925ad9078151dc4" Nov 28 16:30:03 crc kubenswrapper[4884]: I1128 16:30:03.824916 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4" Nov 28 16:30:04 crc kubenswrapper[4884]: I1128 16:30:04.165903 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw"] Nov 28 16:30:04 crc kubenswrapper[4884]: I1128 16:30:04.170495 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-ltflw"] Nov 28 16:30:04 crc kubenswrapper[4884]: I1128 16:30:04.701657 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e6dd427-4110-4ec9-89b0-d94002c9a7f4" path="/var/lib/kubelet/pods/0e6dd427-4110-4ec9-89b0-d94002c9a7f4/volumes" Nov 28 16:30:28 crc kubenswrapper[4884]: I1128 16:30:28.818772 4884 scope.go:117] "RemoveContainer" containerID="358d90be41536924816d57726ed5b634f19d2f1d18317aac8fb9bc70d889c19f" Nov 28 16:31:51 crc kubenswrapper[4884]: I1128 16:31:51.243151 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:31:51 crc kubenswrapper[4884]: I1128 16:31:51.243755 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:32:21 crc kubenswrapper[4884]: I1128 16:32:21.243515 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:32:21 crc kubenswrapper[4884]: I1128 16:32:21.244129 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.243214 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.243849 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.243904 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.244497 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.244555 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" gracePeriod=600 Nov 28 16:32:51 crc kubenswrapper[4884]: E1128 16:32:51.373911 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.541316 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" exitCode=0 Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.541361 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549"} Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.541399 4884 scope.go:117] "RemoveContainer" containerID="7cb690b0c356e59caba6c0b229124cc36e421ca04e3afe7ea8c7dc8ca9bd9030" Nov 28 16:32:51 crc kubenswrapper[4884]: I1128 16:32:51.541908 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:32:51 crc kubenswrapper[4884]: E1128 16:32:51.542159 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:04 crc kubenswrapper[4884]: I1128 16:33:04.690081 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:33:04 crc kubenswrapper[4884]: E1128 16:33:04.690818 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:16 crc kubenswrapper[4884]: I1128 16:33:16.689564 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:33:16 crc kubenswrapper[4884]: E1128 16:33:16.690814 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:27 crc kubenswrapper[4884]: I1128 16:33:27.688996 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:33:27 crc kubenswrapper[4884]: E1128 16:33:27.689954 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.264613 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:31 crc kubenswrapper[4884]: E1128 16:33:31.265432 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e3ebe02-37c4-4e44-b874-ef4cb3189717" containerName="collect-profiles" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.265449 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e3ebe02-37c4-4e44-b874-ef4cb3189717" containerName="collect-profiles" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.265626 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e3ebe02-37c4-4e44-b874-ef4cb3189717" containerName="collect-profiles" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.266764 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.278676 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.443082 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.443193 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlnms\" (UniqueName: \"kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.443233 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.544783 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.544847 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlnms\" (UniqueName: \"kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.544884 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.545497 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.545493 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.580311 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlnms\" (UniqueName: \"kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms\") pod \"certified-operators-lg2cx\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:31 crc kubenswrapper[4884]: I1128 16:33:31.585440 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:32 crc kubenswrapper[4884]: I1128 16:33:32.088788 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:32 crc kubenswrapper[4884]: I1128 16:33:32.844959 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerID="0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f" exitCode=0 Nov 28 16:33:32 crc kubenswrapper[4884]: I1128 16:33:32.845036 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerDied","Data":"0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f"} Nov 28 16:33:32 crc kubenswrapper[4884]: I1128 16:33:32.847957 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerStarted","Data":"f21eb706fd4c3f9b521e6cb579d059c46e22c1f8bf126304a875790ebd6ad225"} Nov 28 16:33:34 crc kubenswrapper[4884]: I1128 16:33:34.861917 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerID="fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf" exitCode=0 Nov 28 16:33:34 crc kubenswrapper[4884]: I1128 16:33:34.861993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerDied","Data":"fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf"} Nov 28 16:33:35 crc kubenswrapper[4884]: I1128 16:33:35.878490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerStarted","Data":"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f"} Nov 28 16:33:35 crc kubenswrapper[4884]: I1128 16:33:35.906165 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lg2cx" podStartSLOduration=2.200442214 podStartE2EDuration="4.906139019s" podCreationTimestamp="2025-11-28 16:33:31 +0000 UTC" firstStartedPulling="2025-11-28 16:33:32.846414382 +0000 UTC m=+4452.409198183" lastFinishedPulling="2025-11-28 16:33:35.552111177 +0000 UTC m=+4455.114894988" observedRunningTime="2025-11-28 16:33:35.898370538 +0000 UTC m=+4455.461154359" watchObservedRunningTime="2025-11-28 16:33:35.906139019 +0000 UTC m=+4455.468922840" Nov 28 16:33:38 crc kubenswrapper[4884]: I1128 16:33:38.688057 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:33:38 crc kubenswrapper[4884]: E1128 16:33:38.688758 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:41 crc kubenswrapper[4884]: I1128 16:33:41.586049 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:41 crc kubenswrapper[4884]: I1128 16:33:41.587518 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:41 crc kubenswrapper[4884]: I1128 16:33:41.639361 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:41 crc kubenswrapper[4884]: I1128 16:33:41.960747 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:42 crc kubenswrapper[4884]: I1128 16:33:42.011064 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:43 crc kubenswrapper[4884]: I1128 16:33:43.937285 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lg2cx" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="registry-server" containerID="cri-o://4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f" gracePeriod=2 Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.357248 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.432793 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities\") pod \"b7877a7d-08c2-4911-af84-9fd1e67a6052\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.432993 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content\") pod \"b7877a7d-08c2-4911-af84-9fd1e67a6052\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.433023 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlnms\" (UniqueName: \"kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms\") pod \"b7877a7d-08c2-4911-af84-9fd1e67a6052\" (UID: \"b7877a7d-08c2-4911-af84-9fd1e67a6052\") " Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.434070 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities" (OuterVolumeSpecName: "utilities") pod "b7877a7d-08c2-4911-af84-9fd1e67a6052" (UID: "b7877a7d-08c2-4911-af84-9fd1e67a6052"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.439004 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms" (OuterVolumeSpecName: "kube-api-access-mlnms") pod "b7877a7d-08c2-4911-af84-9fd1e67a6052" (UID: "b7877a7d-08c2-4911-af84-9fd1e67a6052"). InnerVolumeSpecName "kube-api-access-mlnms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.491972 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7877a7d-08c2-4911-af84-9fd1e67a6052" (UID: "b7877a7d-08c2-4911-af84-9fd1e67a6052"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.534127 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.534165 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlnms\" (UniqueName: \"kubernetes.io/projected/b7877a7d-08c2-4911-af84-9fd1e67a6052-kube-api-access-mlnms\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.534184 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7877a7d-08c2-4911-af84-9fd1e67a6052-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.945882 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerID="4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f" exitCode=0 Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.945919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerDied","Data":"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f"} Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.945930 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lg2cx" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.945943 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lg2cx" event={"ID":"b7877a7d-08c2-4911-af84-9fd1e67a6052","Type":"ContainerDied","Data":"f21eb706fd4c3f9b521e6cb579d059c46e22c1f8bf126304a875790ebd6ad225"} Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.945960 4884 scope.go:117] "RemoveContainer" containerID="4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.975196 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.976985 4884 scope.go:117] "RemoveContainer" containerID="fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf" Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.981737 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lg2cx"] Nov 28 16:33:44 crc kubenswrapper[4884]: I1128 16:33:44.993894 4884 scope.go:117] "RemoveContainer" containerID="0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.022046 4884 scope.go:117] "RemoveContainer" containerID="4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f" Nov 28 16:33:45 crc kubenswrapper[4884]: E1128 16:33:45.022568 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f\": container with ID starting with 4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f not found: ID does not exist" containerID="4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.022618 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f"} err="failed to get container status \"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f\": rpc error: code = NotFound desc = could not find container \"4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f\": container with ID starting with 4032c0c8b3c814e656e25b377d3b5b694c15011f73e5cad2cc26f8e692adba7f not found: ID does not exist" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.022649 4884 scope.go:117] "RemoveContainer" containerID="fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf" Nov 28 16:33:45 crc kubenswrapper[4884]: E1128 16:33:45.023308 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf\": container with ID starting with fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf not found: ID does not exist" containerID="fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.023343 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf"} err="failed to get container status \"fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf\": rpc error: code = NotFound desc = could not find container \"fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf\": container with ID starting with fddced69e879b4d18c778414329fb6f5792fed2f694679129dc23c1115159ebf not found: ID does not exist" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.023363 4884 scope.go:117] "RemoveContainer" containerID="0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f" Nov 28 16:33:45 crc kubenswrapper[4884]: E1128 16:33:45.023683 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f\": container with ID starting with 0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f not found: ID does not exist" containerID="0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f" Nov 28 16:33:45 crc kubenswrapper[4884]: I1128 16:33:45.023741 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f"} err="failed to get container status \"0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f\": rpc error: code = NotFound desc = could not find container \"0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f\": container with ID starting with 0ac2c365c37ef4bc1b453063caecd2c3ad954aea2a57dda2ed6952028d372d3f not found: ID does not exist" Nov 28 16:33:46 crc kubenswrapper[4884]: I1128 16:33:46.699523 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" path="/var/lib/kubelet/pods/b7877a7d-08c2-4911-af84-9fd1e67a6052/volumes" Nov 28 16:33:53 crc kubenswrapper[4884]: I1128 16:33:53.688887 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:33:53 crc kubenswrapper[4884]: E1128 16:33:53.689726 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.076481 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:33:55 crc kubenswrapper[4884]: E1128 16:33:55.078018 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="extract-content" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.078147 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="extract-content" Nov 28 16:33:55 crc kubenswrapper[4884]: E1128 16:33:55.078282 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="registry-server" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.078374 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="registry-server" Nov 28 16:33:55 crc kubenswrapper[4884]: E1128 16:33:55.078456 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="extract-utilities" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.078536 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="extract-utilities" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.078739 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7877a7d-08c2-4911-af84-9fd1e67a6052" containerName="registry-server" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.080070 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.091094 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.173806 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wsbv\" (UniqueName: \"kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.174176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.174304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.275876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.276223 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.276379 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wsbv\" (UniqueName: \"kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.276411 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.276728 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.296980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wsbv\" (UniqueName: \"kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv\") pod \"redhat-marketplace-tmwqt\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.402424 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:33:55 crc kubenswrapper[4884]: I1128 16:33:55.900372 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:33:56 crc kubenswrapper[4884]: I1128 16:33:56.029433 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerStarted","Data":"1152af63a7c392c835b269cd31aeecac474bee55fc00f7b4097e50665e70f72b"} Nov 28 16:33:57 crc kubenswrapper[4884]: I1128 16:33:57.038938 4884 generic.go:334] "Generic (PLEG): container finished" podID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerID="4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd" exitCode=0 Nov 28 16:33:57 crc kubenswrapper[4884]: I1128 16:33:57.039050 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerDied","Data":"4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd"} Nov 28 16:33:57 crc kubenswrapper[4884]: I1128 16:33:57.040868 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:34:00 crc kubenswrapper[4884]: I1128 16:34:00.073406 4884 generic.go:334] "Generic (PLEG): container finished" podID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerID="45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7" exitCode=0 Nov 28 16:34:00 crc kubenswrapper[4884]: I1128 16:34:00.074817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerDied","Data":"45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7"} Nov 28 16:34:02 crc kubenswrapper[4884]: I1128 16:34:02.088596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerStarted","Data":"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0"} Nov 28 16:34:02 crc kubenswrapper[4884]: I1128 16:34:02.107235 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tmwqt" podStartSLOduration=2.857305417 podStartE2EDuration="7.107214862s" podCreationTimestamp="2025-11-28 16:33:55 +0000 UTC" firstStartedPulling="2025-11-28 16:33:57.040603669 +0000 UTC m=+4476.603387470" lastFinishedPulling="2025-11-28 16:34:01.290513104 +0000 UTC m=+4480.853296915" observedRunningTime="2025-11-28 16:34:02.10138639 +0000 UTC m=+4481.664170181" watchObservedRunningTime="2025-11-28 16:34:02.107214862 +0000 UTC m=+4481.669998673" Nov 28 16:34:05 crc kubenswrapper[4884]: I1128 16:34:05.403569 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:05 crc kubenswrapper[4884]: I1128 16:34:05.405118 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:05 crc kubenswrapper[4884]: I1128 16:34:05.447333 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:05 crc kubenswrapper[4884]: I1128 16:34:05.688211 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:34:05 crc kubenswrapper[4884]: E1128 16:34:05.688499 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:34:06 crc kubenswrapper[4884]: I1128 16:34:06.155481 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:06 crc kubenswrapper[4884]: I1128 16:34:06.200889 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:34:08 crc kubenswrapper[4884]: I1128 16:34:08.130106 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tmwqt" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="registry-server" containerID="cri-o://ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0" gracePeriod=2 Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.014244 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.069164 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities\") pod \"f513c278-4fc1-4972-aa28-3a94e2f1279d\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.069381 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content\") pod \"f513c278-4fc1-4972-aa28-3a94e2f1279d\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.069416 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wsbv\" (UniqueName: \"kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv\") pod \"f513c278-4fc1-4972-aa28-3a94e2f1279d\" (UID: \"f513c278-4fc1-4972-aa28-3a94e2f1279d\") " Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.070517 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities" (OuterVolumeSpecName: "utilities") pod "f513c278-4fc1-4972-aa28-3a94e2f1279d" (UID: "f513c278-4fc1-4972-aa28-3a94e2f1279d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.074626 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv" (OuterVolumeSpecName: "kube-api-access-9wsbv") pod "f513c278-4fc1-4972-aa28-3a94e2f1279d" (UID: "f513c278-4fc1-4972-aa28-3a94e2f1279d"). InnerVolumeSpecName "kube-api-access-9wsbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.096771 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f513c278-4fc1-4972-aa28-3a94e2f1279d" (UID: "f513c278-4fc1-4972-aa28-3a94e2f1279d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.139518 4884 generic.go:334] "Generic (PLEG): container finished" podID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerID="ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0" exitCode=0 Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.139575 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerDied","Data":"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0"} Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.139601 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tmwqt" event={"ID":"f513c278-4fc1-4972-aa28-3a94e2f1279d","Type":"ContainerDied","Data":"1152af63a7c392c835b269cd31aeecac474bee55fc00f7b4097e50665e70f72b"} Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.139635 4884 scope.go:117] "RemoveContainer" containerID="ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.139787 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tmwqt" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.161673 4884 scope.go:117] "RemoveContainer" containerID="45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.174191 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.174219 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wsbv\" (UniqueName: \"kubernetes.io/projected/f513c278-4fc1-4972-aa28-3a94e2f1279d-kube-api-access-9wsbv\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.174228 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f513c278-4fc1-4972-aa28-3a94e2f1279d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.178347 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.186247 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tmwqt"] Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.193624 4884 scope.go:117] "RemoveContainer" containerID="4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.208893 4884 scope.go:117] "RemoveContainer" containerID="ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0" Nov 28 16:34:09 crc kubenswrapper[4884]: E1128 16:34:09.209383 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0\": container with ID starting with ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0 not found: ID does not exist" containerID="ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.209415 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0"} err="failed to get container status \"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0\": rpc error: code = NotFound desc = could not find container \"ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0\": container with ID starting with ed6409d08c7d55b865fc51ee4ff35b9217e252c0f468befa34069c82afb01ab0 not found: ID does not exist" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.209442 4884 scope.go:117] "RemoveContainer" containerID="45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7" Nov 28 16:34:09 crc kubenswrapper[4884]: E1128 16:34:09.209709 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7\": container with ID starting with 45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7 not found: ID does not exist" containerID="45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.209941 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7"} err="failed to get container status \"45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7\": rpc error: code = NotFound desc = could not find container \"45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7\": container with ID starting with 45b0527c643e0e428e3bcae03098bdcb3491b56522d3d1c421ea14627d0630c7 not found: ID does not exist" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.209958 4884 scope.go:117] "RemoveContainer" containerID="4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd" Nov 28 16:34:09 crc kubenswrapper[4884]: E1128 16:34:09.210455 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd\": container with ID starting with 4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd not found: ID does not exist" containerID="4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd" Nov 28 16:34:09 crc kubenswrapper[4884]: I1128 16:34:09.210499 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd"} err="failed to get container status \"4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd\": rpc error: code = NotFound desc = could not find container \"4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd\": container with ID starting with 4e0091b9b092d549b605caa01e198799062907208808e63b84ec871d697e57dd not found: ID does not exist" Nov 28 16:34:10 crc kubenswrapper[4884]: I1128 16:34:10.696996 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" path="/var/lib/kubelet/pods/f513c278-4fc1-4972-aa28-3a94e2f1279d/volumes" Nov 28 16:34:18 crc kubenswrapper[4884]: I1128 16:34:18.689468 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:34:18 crc kubenswrapper[4884]: E1128 16:34:18.690787 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:34:29 crc kubenswrapper[4884]: I1128 16:34:29.688070 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:34:29 crc kubenswrapper[4884]: E1128 16:34:29.689001 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:34:42 crc kubenswrapper[4884]: I1128 16:34:42.688242 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:34:42 crc kubenswrapper[4884]: E1128 16:34:42.689075 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:34:54 crc kubenswrapper[4884]: I1128 16:34:54.689377 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:34:54 crc kubenswrapper[4884]: E1128 16:34:54.690743 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:35:05 crc kubenswrapper[4884]: I1128 16:35:05.687860 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:35:05 crc kubenswrapper[4884]: E1128 16:35:05.688608 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:35:16 crc kubenswrapper[4884]: I1128 16:35:16.688288 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:35:16 crc kubenswrapper[4884]: E1128 16:35:16.689054 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:35:27 crc kubenswrapper[4884]: I1128 16:35:27.688502 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:35:27 crc kubenswrapper[4884]: E1128 16:35:27.689256 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:35:40 crc kubenswrapper[4884]: I1128 16:35:40.693966 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:35:40 crc kubenswrapper[4884]: E1128 16:35:40.694843 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:35:51 crc kubenswrapper[4884]: I1128 16:35:51.688264 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:35:51 crc kubenswrapper[4884]: E1128 16:35:51.689168 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:36:03 crc kubenswrapper[4884]: I1128 16:36:03.688460 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:36:03 crc kubenswrapper[4884]: E1128 16:36:03.689244 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:36:18 crc kubenswrapper[4884]: I1128 16:36:18.688639 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:36:18 crc kubenswrapper[4884]: E1128 16:36:18.689587 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:36:32 crc kubenswrapper[4884]: I1128 16:36:32.688814 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:36:32 crc kubenswrapper[4884]: E1128 16:36:32.689756 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:36:43 crc kubenswrapper[4884]: I1128 16:36:43.688348 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:36:43 crc kubenswrapper[4884]: E1128 16:36:43.689622 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:36:55 crc kubenswrapper[4884]: I1128 16:36:55.689808 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:36:55 crc kubenswrapper[4884]: E1128 16:36:55.691114 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:37:06 crc kubenswrapper[4884]: I1128 16:37:06.689282 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:37:06 crc kubenswrapper[4884]: E1128 16:37:06.690156 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:37:21 crc kubenswrapper[4884]: I1128 16:37:21.688428 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:37:21 crc kubenswrapper[4884]: E1128 16:37:21.690436 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.297599 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:27 crc kubenswrapper[4884]: E1128 16:37:27.298418 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="registry-server" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.298430 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="registry-server" Nov 28 16:37:27 crc kubenswrapper[4884]: E1128 16:37:27.298446 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="extract-utilities" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.298452 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="extract-utilities" Nov 28 16:37:27 crc kubenswrapper[4884]: E1128 16:37:27.298469 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="extract-content" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.298474 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="extract-content" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.298642 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f513c278-4fc1-4972-aa28-3a94e2f1279d" containerName="registry-server" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.299945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.318372 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.411698 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.411757 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.411852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npfps\" (UniqueName: \"kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.513868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.513942 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.513996 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npfps\" (UniqueName: \"kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.514511 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.514551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.532896 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npfps\" (UniqueName: \"kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps\") pod \"community-operators-cfmfz\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:27 crc kubenswrapper[4884]: I1128 16:37:27.717773 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:28 crc kubenswrapper[4884]: I1128 16:37:28.170533 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:28 crc kubenswrapper[4884]: I1128 16:37:28.556934 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerID="2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524" exitCode=0 Nov 28 16:37:28 crc kubenswrapper[4884]: I1128 16:37:28.556988 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerDied","Data":"2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524"} Nov 28 16:37:28 crc kubenswrapper[4884]: I1128 16:37:28.557018 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerStarted","Data":"c52d9e917e590265f1ed35adc6c26109b82fd06c9fb0866b7a4d0c97a8092757"} Nov 28 16:37:30 crc kubenswrapper[4884]: I1128 16:37:30.575362 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerID="b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9" exitCode=0 Nov 28 16:37:30 crc kubenswrapper[4884]: I1128 16:37:30.575479 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerDied","Data":"b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9"} Nov 28 16:37:31 crc kubenswrapper[4884]: I1128 16:37:31.584339 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerStarted","Data":"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e"} Nov 28 16:37:31 crc kubenswrapper[4884]: I1128 16:37:31.607770 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cfmfz" podStartSLOduration=2.083812806 podStartE2EDuration="4.607753573s" podCreationTimestamp="2025-11-28 16:37:27 +0000 UTC" firstStartedPulling="2025-11-28 16:37:28.558526064 +0000 UTC m=+4688.121309865" lastFinishedPulling="2025-11-28 16:37:31.082466831 +0000 UTC m=+4690.645250632" observedRunningTime="2025-11-28 16:37:31.602524056 +0000 UTC m=+4691.165307857" watchObservedRunningTime="2025-11-28 16:37:31.607753573 +0000 UTC m=+4691.170537374" Nov 28 16:37:32 crc kubenswrapper[4884]: I1128 16:37:32.688831 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:37:32 crc kubenswrapper[4884]: E1128 16:37:32.689532 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:37:37 crc kubenswrapper[4884]: I1128 16:37:37.718880 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:37 crc kubenswrapper[4884]: I1128 16:37:37.719438 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:37 crc kubenswrapper[4884]: I1128 16:37:37.767190 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:38 crc kubenswrapper[4884]: I1128 16:37:38.676106 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:38 crc kubenswrapper[4884]: I1128 16:37:38.722280 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:40 crc kubenswrapper[4884]: I1128 16:37:40.645908 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cfmfz" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="registry-server" containerID="cri-o://b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e" gracePeriod=2 Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.513918 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.535548 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content\") pod \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.535658 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npfps\" (UniqueName: \"kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps\") pod \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.535714 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities\") pod \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\" (UID: \"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b\") " Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.536957 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities" (OuterVolumeSpecName: "utilities") pod "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" (UID: "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.543200 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps" (OuterVolumeSpecName: "kube-api-access-npfps") pod "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" (UID: "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b"). InnerVolumeSpecName "kube-api-access-npfps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.589246 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" (UID: "fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.636619 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npfps\" (UniqueName: \"kubernetes.io/projected/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-kube-api-access-npfps\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.636659 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.636668 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.657390 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerID="b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e" exitCode=0 Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.657433 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerDied","Data":"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e"} Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.657485 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cfmfz" event={"ID":"fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b","Type":"ContainerDied","Data":"c52d9e917e590265f1ed35adc6c26109b82fd06c9fb0866b7a4d0c97a8092757"} Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.657508 4884 scope.go:117] "RemoveContainer" containerID="b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.657528 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cfmfz" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.682709 4884 scope.go:117] "RemoveContainer" containerID="b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.706294 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.715712 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cfmfz"] Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.723713 4884 scope.go:117] "RemoveContainer" containerID="2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.746995 4884 scope.go:117] "RemoveContainer" containerID="b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e" Nov 28 16:37:41 crc kubenswrapper[4884]: E1128 16:37:41.747649 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e\": container with ID starting with b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e not found: ID does not exist" containerID="b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.747703 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e"} err="failed to get container status \"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e\": rpc error: code = NotFound desc = could not find container \"b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e\": container with ID starting with b0957f65eb42a844aca14e56ce8fc2263a24626e598146468f6df56e1d730d3e not found: ID does not exist" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.747737 4884 scope.go:117] "RemoveContainer" containerID="b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9" Nov 28 16:37:41 crc kubenswrapper[4884]: E1128 16:37:41.748025 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9\": container with ID starting with b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9 not found: ID does not exist" containerID="b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.748053 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9"} err="failed to get container status \"b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9\": rpc error: code = NotFound desc = could not find container \"b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9\": container with ID starting with b80e3e9cbde92a334d8aeefbb00b42219ef0814241f7c5842127d55b6803a5a9 not found: ID does not exist" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.748071 4884 scope.go:117] "RemoveContainer" containerID="2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524" Nov 28 16:37:41 crc kubenswrapper[4884]: E1128 16:37:41.748492 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524\": container with ID starting with 2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524 not found: ID does not exist" containerID="2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524" Nov 28 16:37:41 crc kubenswrapper[4884]: I1128 16:37:41.748517 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524"} err="failed to get container status \"2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524\": rpc error: code = NotFound desc = could not find container \"2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524\": container with ID starting with 2306ee59ff8499ffeaf2345d12e92564112b264f14f97fe73a50a18edaa54524 not found: ID does not exist" Nov 28 16:37:42 crc kubenswrapper[4884]: I1128 16:37:42.699255 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" path="/var/lib/kubelet/pods/fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b/volumes" Nov 28 16:37:43 crc kubenswrapper[4884]: I1128 16:37:43.688920 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:37:43 crc kubenswrapper[4884]: E1128 16:37:43.689323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.645052 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-nggv8"] Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.650392 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-nggv8"] Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.766443 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-t68tp"] Nov 28 16:37:47 crc kubenswrapper[4884]: E1128 16:37:47.766727 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="extract-content" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.766744 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="extract-content" Nov 28 16:37:47 crc kubenswrapper[4884]: E1128 16:37:47.766762 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="registry-server" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.766769 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="registry-server" Nov 28 16:37:47 crc kubenswrapper[4884]: E1128 16:37:47.766785 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="extract-utilities" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.766792 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="extract-utilities" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.766930 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa078ac7-5ad2-4ddb-a483-a63fcfb7f84b" containerName="registry-server" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.767390 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.769601 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.769631 4884 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ptzd4" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.769601 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.769662 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.783147 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-t68tp"] Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.924784 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4mf9\" (UniqueName: \"kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.925196 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:47 crc kubenswrapper[4884]: I1128 16:37:47.925423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.026901 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.027036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4mf9\" (UniqueName: \"kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.027186 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.027544 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.027803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.053789 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4mf9\" (UniqueName: \"kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9\") pod \"crc-storage-crc-t68tp\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.092822 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.524037 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-t68tp"] Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.700009 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d0ecad9-0e8e-4826-b39b-97452fe16dfe" path="/var/lib/kubelet/pods/5d0ecad9-0e8e-4826-b39b-97452fe16dfe/volumes" Nov 28 16:37:48 crc kubenswrapper[4884]: I1128 16:37:48.709153 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t68tp" event={"ID":"3ad344c1-b2ed-4068-9efc-04376d256a80","Type":"ContainerStarted","Data":"8c4dfa33763d88e94bf1f36548a5c1d5782545c2a34ba4276db4f606d9c8cc4d"} Nov 28 16:37:49 crc kubenswrapper[4884]: I1128 16:37:49.716920 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t68tp" event={"ID":"3ad344c1-b2ed-4068-9efc-04376d256a80","Type":"ContainerStarted","Data":"2ce63d25c8f5fce9d965f4d02571a22e8dfc773a4d721dbbca0948f9e85cc06a"} Nov 28 16:37:49 crc kubenswrapper[4884]: I1128 16:37:49.735826 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="crc-storage/crc-storage-crc-t68tp" podStartSLOduration=1.852985131 podStartE2EDuration="2.735804171s" podCreationTimestamp="2025-11-28 16:37:47 +0000 UTC" firstStartedPulling="2025-11-28 16:37:48.52306976 +0000 UTC m=+4708.085853561" lastFinishedPulling="2025-11-28 16:37:49.4058888 +0000 UTC m=+4708.968672601" observedRunningTime="2025-11-28 16:37:49.729988838 +0000 UTC m=+4709.292772639" watchObservedRunningTime="2025-11-28 16:37:49.735804171 +0000 UTC m=+4709.298587982" Nov 28 16:37:50 crc kubenswrapper[4884]: I1128 16:37:50.728633 4884 generic.go:334] "Generic (PLEG): container finished" podID="3ad344c1-b2ed-4068-9efc-04376d256a80" containerID="2ce63d25c8f5fce9d965f4d02571a22e8dfc773a4d721dbbca0948f9e85cc06a" exitCode=0 Nov 28 16:37:50 crc kubenswrapper[4884]: I1128 16:37:50.728697 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t68tp" event={"ID":"3ad344c1-b2ed-4068-9efc-04376d256a80","Type":"ContainerDied","Data":"2ce63d25c8f5fce9d965f4d02571a22e8dfc773a4d721dbbca0948f9e85cc06a"} Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.012385 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.188169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "3ad344c1-b2ed-4068-9efc-04376d256a80" (UID: "3ad344c1-b2ed-4068-9efc-04376d256a80"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.188177 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt\") pod \"3ad344c1-b2ed-4068-9efc-04376d256a80\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.188659 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage\") pod \"3ad344c1-b2ed-4068-9efc-04376d256a80\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.188772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4mf9\" (UniqueName: \"kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9\") pod \"3ad344c1-b2ed-4068-9efc-04376d256a80\" (UID: \"3ad344c1-b2ed-4068-9efc-04376d256a80\") " Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.189840 4884 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3ad344c1-b2ed-4068-9efc-04376d256a80-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.194999 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9" (OuterVolumeSpecName: "kube-api-access-p4mf9") pod "3ad344c1-b2ed-4068-9efc-04376d256a80" (UID: "3ad344c1-b2ed-4068-9efc-04376d256a80"). InnerVolumeSpecName "kube-api-access-p4mf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.208737 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "3ad344c1-b2ed-4068-9efc-04376d256a80" (UID: "3ad344c1-b2ed-4068-9efc-04376d256a80"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.291024 4884 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3ad344c1-b2ed-4068-9efc-04376d256a80-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.291061 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4mf9\" (UniqueName: \"kubernetes.io/projected/3ad344c1-b2ed-4068-9efc-04376d256a80-kube-api-access-p4mf9\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.743041 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-t68tp" event={"ID":"3ad344c1-b2ed-4068-9efc-04376d256a80","Type":"ContainerDied","Data":"8c4dfa33763d88e94bf1f36548a5c1d5782545c2a34ba4276db4f606d9c8cc4d"} Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.743082 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c4dfa33763d88e94bf1f36548a5c1d5782545c2a34ba4276db4f606d9c8cc4d" Nov 28 16:37:52 crc kubenswrapper[4884]: I1128 16:37:52.743134 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-t68tp" Nov 28 16:37:53 crc kubenswrapper[4884]: I1128 16:37:53.891237 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-t68tp"] Nov 28 16:37:53 crc kubenswrapper[4884]: I1128 16:37:53.897792 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-t68tp"] Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.029233 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-lpk94"] Nov 28 16:37:54 crc kubenswrapper[4884]: E1128 16:37:54.029521 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad344c1-b2ed-4068-9efc-04376d256a80" containerName="storage" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.029534 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad344c1-b2ed-4068-9efc-04376d256a80" containerName="storage" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.029679 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad344c1-b2ed-4068-9efc-04376d256a80" containerName="storage" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.030140 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.033361 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.033676 4884 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-ptzd4" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.033873 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.037825 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.041955 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-lpk94"] Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.218867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.218939 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.219330 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9kn9\" (UniqueName: \"kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.321020 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.321222 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.321365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9kn9\" (UniqueName: \"kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.321486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.321979 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.341322 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9kn9\" (UniqueName: \"kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9\") pod \"crc-storage-crc-lpk94\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.353732 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.698611 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ad344c1-b2ed-4068-9efc-04376d256a80" path="/var/lib/kubelet/pods/3ad344c1-b2ed-4068-9efc-04376d256a80/volumes" Nov 28 16:37:54 crc kubenswrapper[4884]: I1128 16:37:54.766641 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-lpk94"] Nov 28 16:37:55 crc kubenswrapper[4884]: I1128 16:37:55.770016 4884 generic.go:334] "Generic (PLEG): container finished" podID="593aea57-09af-4a56-bb26-935aa3168ab1" containerID="c148a9f5a1ed8ac0ac41215aba3810b7b49d06493eb0354659bd245f900ae1c1" exitCode=0 Nov 28 16:37:55 crc kubenswrapper[4884]: I1128 16:37:55.770150 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lpk94" event={"ID":"593aea57-09af-4a56-bb26-935aa3168ab1","Type":"ContainerDied","Data":"c148a9f5a1ed8ac0ac41215aba3810b7b49d06493eb0354659bd245f900ae1c1"} Nov 28 16:37:55 crc kubenswrapper[4884]: I1128 16:37:55.770319 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lpk94" event={"ID":"593aea57-09af-4a56-bb26-935aa3168ab1","Type":"ContainerStarted","Data":"1b13331117f32d9e7f137a9e6c1c7befc755ad13af5ef2a3b1ddc280ab202888"} Nov 28 16:37:56 crc kubenswrapper[4884]: I1128 16:37:56.688933 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.058610 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.160466 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt\") pod \"593aea57-09af-4a56-bb26-935aa3168ab1\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.160559 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "593aea57-09af-4a56-bb26-935aa3168ab1" (UID: "593aea57-09af-4a56-bb26-935aa3168ab1"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.160832 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage\") pod \"593aea57-09af-4a56-bb26-935aa3168ab1\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.160981 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9kn9\" (UniqueName: \"kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9\") pod \"593aea57-09af-4a56-bb26-935aa3168ab1\" (UID: \"593aea57-09af-4a56-bb26-935aa3168ab1\") " Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.161498 4884 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/593aea57-09af-4a56-bb26-935aa3168ab1-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.166871 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9" (OuterVolumeSpecName: "kube-api-access-m9kn9") pod "593aea57-09af-4a56-bb26-935aa3168ab1" (UID: "593aea57-09af-4a56-bb26-935aa3168ab1"). InnerVolumeSpecName "kube-api-access-m9kn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.179598 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "593aea57-09af-4a56-bb26-935aa3168ab1" (UID: "593aea57-09af-4a56-bb26-935aa3168ab1"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.262477 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9kn9\" (UniqueName: \"kubernetes.io/projected/593aea57-09af-4a56-bb26-935aa3168ab1-kube-api-access-m9kn9\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.262511 4884 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/593aea57-09af-4a56-bb26-935aa3168ab1-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.790135 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf"} Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.792075 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lpk94" event={"ID":"593aea57-09af-4a56-bb26-935aa3168ab1","Type":"ContainerDied","Data":"1b13331117f32d9e7f137a9e6c1c7befc755ad13af5ef2a3b1ddc280ab202888"} Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.792116 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b13331117f32d9e7f137a9e6c1c7befc755ad13af5ef2a3b1ddc280ab202888" Nov 28 16:37:57 crc kubenswrapper[4884]: I1128 16:37:57.792176 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lpk94" Nov 28 16:38:28 crc kubenswrapper[4884]: I1128 16:38:28.973906 4884 scope.go:117] "RemoveContainer" containerID="9821f0fddf719c8d9786a0fb5f9613a35e429999f62b67b81d57d7708ea679e7" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.609434 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:39:49 crc kubenswrapper[4884]: E1128 16:39:49.610304 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="593aea57-09af-4a56-bb26-935aa3168ab1" containerName="storage" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.610320 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="593aea57-09af-4a56-bb26-935aa3168ab1" containerName="storage" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.610506 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="593aea57-09af-4a56-bb26-935aa3168ab1" containerName="storage" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.611601 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.631310 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.715510 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7rtx\" (UniqueName: \"kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.715554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.715653 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.817410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7rtx\" (UniqueName: \"kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.817468 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.817534 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.818339 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.818494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.836812 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7rtx\" (UniqueName: \"kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx\") pod \"redhat-operators-9b8v5\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:49 crc kubenswrapper[4884]: I1128 16:39:49.978807 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:50 crc kubenswrapper[4884]: I1128 16:39:50.196977 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:39:50 crc kubenswrapper[4884]: I1128 16:39:50.630782 4884 generic.go:334] "Generic (PLEG): container finished" podID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerID="c2fbbe3b907bb57d204801837874d00f134517f467600d70c832b5c576be0f86" exitCode=0 Nov 28 16:39:50 crc kubenswrapper[4884]: I1128 16:39:50.630839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerDied","Data":"c2fbbe3b907bb57d204801837874d00f134517f467600d70c832b5c576be0f86"} Nov 28 16:39:50 crc kubenswrapper[4884]: I1128 16:39:50.632232 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerStarted","Data":"7f55192abd0b5aa8a08cf7cf543936986a4b604dcf434f5efbbb71d4263393d8"} Nov 28 16:39:50 crc kubenswrapper[4884]: I1128 16:39:50.632853 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:39:51 crc kubenswrapper[4884]: I1128 16:39:51.639758 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerStarted","Data":"acaa5aba170ae0509660149bccc98a9888a56df7de327bbaaf3ff717c5672bda"} Nov 28 16:39:52 crc kubenswrapper[4884]: I1128 16:39:52.652167 4884 generic.go:334] "Generic (PLEG): container finished" podID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerID="acaa5aba170ae0509660149bccc98a9888a56df7de327bbaaf3ff717c5672bda" exitCode=0 Nov 28 16:39:52 crc kubenswrapper[4884]: I1128 16:39:52.652570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerDied","Data":"acaa5aba170ae0509660149bccc98a9888a56df7de327bbaaf3ff717c5672bda"} Nov 28 16:39:53 crc kubenswrapper[4884]: I1128 16:39:53.662226 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerStarted","Data":"d965970c4f2577c527b1120964fc9ecda01398218e54b252ec8e0bc344a76a0c"} Nov 28 16:39:53 crc kubenswrapper[4884]: I1128 16:39:53.685958 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9b8v5" podStartSLOduration=2.03942572 podStartE2EDuration="4.685940634s" podCreationTimestamp="2025-11-28 16:39:49 +0000 UTC" firstStartedPulling="2025-11-28 16:39:50.632564172 +0000 UTC m=+4830.195347973" lastFinishedPulling="2025-11-28 16:39:53.279079086 +0000 UTC m=+4832.841862887" observedRunningTime="2025-11-28 16:39:53.680395848 +0000 UTC m=+4833.243179649" watchObservedRunningTime="2025-11-28 16:39:53.685940634 +0000 UTC m=+4833.248724435" Nov 28 16:39:59 crc kubenswrapper[4884]: I1128 16:39:59.979759 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:39:59 crc kubenswrapper[4884]: I1128 16:39:59.980911 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:40:00 crc kubenswrapper[4884]: I1128 16:40:00.030038 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:40:00 crc kubenswrapper[4884]: I1128 16:40:00.768213 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:40:00 crc kubenswrapper[4884]: I1128 16:40:00.816160 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:40:02 crc kubenswrapper[4884]: I1128 16:40:02.726304 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9b8v5" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="registry-server" containerID="cri-o://d965970c4f2577c527b1120964fc9ecda01398218e54b252ec8e0bc344a76a0c" gracePeriod=2 Nov 28 16:40:04 crc kubenswrapper[4884]: I1128 16:40:04.746066 4884 generic.go:334] "Generic (PLEG): container finished" podID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerID="d965970c4f2577c527b1120964fc9ecda01398218e54b252ec8e0bc344a76a0c" exitCode=0 Nov 28 16:40:04 crc kubenswrapper[4884]: I1128 16:40:04.746114 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerDied","Data":"d965970c4f2577c527b1120964fc9ecda01398218e54b252ec8e0bc344a76a0c"} Nov 28 16:40:04 crc kubenswrapper[4884]: I1128 16:40:04.925290 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.024198 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7rtx\" (UniqueName: \"kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx\") pod \"70f04c02-05f6-45dc-9619-1349bb377b5f\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.024264 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content\") pod \"70f04c02-05f6-45dc-9619-1349bb377b5f\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.024455 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities\") pod \"70f04c02-05f6-45dc-9619-1349bb377b5f\" (UID: \"70f04c02-05f6-45dc-9619-1349bb377b5f\") " Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.025468 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities" (OuterVolumeSpecName: "utilities") pod "70f04c02-05f6-45dc-9619-1349bb377b5f" (UID: "70f04c02-05f6-45dc-9619-1349bb377b5f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.030274 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx" (OuterVolumeSpecName: "kube-api-access-n7rtx") pod "70f04c02-05f6-45dc-9619-1349bb377b5f" (UID: "70f04c02-05f6-45dc-9619-1349bb377b5f"). InnerVolumeSpecName "kube-api-access-n7rtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.125235 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.125271 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7rtx\" (UniqueName: \"kubernetes.io/projected/70f04c02-05f6-45dc-9619-1349bb377b5f-kube-api-access-n7rtx\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.137749 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70f04c02-05f6-45dc-9619-1349bb377b5f" (UID: "70f04c02-05f6-45dc-9619-1349bb377b5f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.226753 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70f04c02-05f6-45dc-9619-1349bb377b5f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.760630 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9b8v5" event={"ID":"70f04c02-05f6-45dc-9619-1349bb377b5f","Type":"ContainerDied","Data":"7f55192abd0b5aa8a08cf7cf543936986a4b604dcf434f5efbbb71d4263393d8"} Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.760714 4884 scope.go:117] "RemoveContainer" containerID="d965970c4f2577c527b1120964fc9ecda01398218e54b252ec8e0bc344a76a0c" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.760727 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9b8v5" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.782463 4884 scope.go:117] "RemoveContainer" containerID="acaa5aba170ae0509660149bccc98a9888a56df7de327bbaaf3ff717c5672bda" Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.809042 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.820196 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9b8v5"] Nov 28 16:40:05 crc kubenswrapper[4884]: I1128 16:40:05.832109 4884 scope.go:117] "RemoveContainer" containerID="c2fbbe3b907bb57d204801837874d00f134517f467600d70c832b5c576be0f86" Nov 28 16:40:06 crc kubenswrapper[4884]: I1128 16:40:06.697625 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" path="/var/lib/kubelet/pods/70f04c02-05f6-45dc-9619-1349bb377b5f/volumes" Nov 28 16:40:21 crc kubenswrapper[4884]: I1128 16:40:21.243729 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:40:21 crc kubenswrapper[4884]: I1128 16:40:21.244635 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:40:51 crc kubenswrapper[4884]: I1128 16:40:51.243524 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:40:51 crc kubenswrapper[4884]: I1128 16:40:51.244120 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.242646 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.243221 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.243269 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.243873 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.243958 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf" gracePeriod=600 Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.650130 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf" exitCode=0 Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.650278 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf"} Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.650389 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41"} Nov 28 16:41:21 crc kubenswrapper[4884]: I1128 16:41:21.650407 4884 scope.go:117] "RemoveContainer" containerID="ddce061de6726722b76298812fcb848d32f632ba8693ba5f1d89ba7012d08549" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.382164 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:25 crc kubenswrapper[4884]: E1128 16:41:25.382848 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="registry-server" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.382861 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="registry-server" Nov 28 16:41:25 crc kubenswrapper[4884]: E1128 16:41:25.382893 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="extract-utilities" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.382900 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="extract-utilities" Nov 28 16:41:25 crc kubenswrapper[4884]: E1128 16:41:25.382914 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="extract-content" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.382922 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="extract-content" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.383059 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="70f04c02-05f6-45dc-9619-1349bb377b5f" containerName="registry-server" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.383738 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.386542 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-922rj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.386709 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.386870 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.391941 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.394426 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.397961 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.478045 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfkt8\" (UniqueName: \"kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.478137 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.478178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.579055 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.579476 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfkt8\" (UniqueName: \"kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.579667 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.582603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.582603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.626218 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfkt8\" (UniqueName: \"kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8\") pod \"dnsmasq-dns-5d7b5456f5-vrgzj\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.700063 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.706322 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.707772 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.765453 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.884139 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.884194 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.884240 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhgb2\" (UniqueName: \"kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.985077 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhgb2\" (UniqueName: \"kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.985165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.985190 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.985850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:25 crc kubenswrapper[4884]: I1128 16:41:25.986642 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.006809 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhgb2\" (UniqueName: \"kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2\") pod \"dnsmasq-dns-98ddfc8f-hbgxz\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.058697 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.350252 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:26 crc kubenswrapper[4884]: W1128 16:41:26.357168 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeecbf919_102c_4f0b_bd7b_b58a9b70c2bd.slice/crio-0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b WatchSource:0}: Error finding container 0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b: Status 404 returned error can't find the container with id 0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.507147 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.558653 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.560193 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.565696 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.568023 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.568398 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.568658 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.568932 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-l5xvh" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.576377 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.698401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" event={"ID":"9c99def4-9ef7-4685-9258-1cc1256967c2","Type":"ContainerStarted","Data":"1a7c2f9599341fc9dd6570b82b5922df1b9ed132b82ad961c1dfd4770795591c"} Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.698897 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" event={"ID":"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd","Type":"ContainerStarted","Data":"0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b"} Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.705830 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706122 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706167 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706200 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706264 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706287 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h6vd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.706387 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.809944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.809994 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810024 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810085 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h6vd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810142 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810329 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.810538 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.811325 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.812551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.814563 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.814673 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/581d5aade5c9467420aad429baaa5fa1bb1cbf44a236a1cafcd4427634e1135a/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.814785 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.816596 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.817739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.832142 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h6vd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.850267 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.886523 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.887774 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.889601 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.890335 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.890801 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.890980 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-dqprr" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.893890 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.896419 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:41:26 crc kubenswrapper[4884]: I1128 16:41:26.903114 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.014954 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015193 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb4dl\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015287 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015374 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015420 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015523 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015722 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.015760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117108 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117176 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117195 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117211 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb4dl\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117266 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117292 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.117325 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.118567 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.120139 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.120439 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.120730 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.121083 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.121133 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1117efca265b3507eb58d954574e4372ae58838f2807435990a8e560db25e330/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.123060 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.124364 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.131698 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.135843 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb4dl\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.154233 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.265714 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.348322 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.711310 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.716471 4884 generic.go:334] "Generic (PLEG): container finished" podID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerID="1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e" exitCode=0 Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.716593 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" event={"ID":"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd","Type":"ContainerDied","Data":"1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e"} Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.717926 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerStarted","Data":"4ab68a337654d03ad4e0841c8542e9e0f05cd55a9920da6112d9927f8424d43f"} Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.722766 4884 generic.go:334] "Generic (PLEG): container finished" podID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerID="49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655" exitCode=0 Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.722810 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" event={"ID":"9c99def4-9ef7-4685-9258-1cc1256967c2","Type":"ContainerDied","Data":"49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655"} Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.877195 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.879312 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.884695 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9jq56" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.884704 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.885001 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.885251 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.885734 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.893228 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 16:41:27 crc kubenswrapper[4884]: E1128 16:41:27.900554 4884 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 28 16:41:27 crc kubenswrapper[4884]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 16:41:27 crc kubenswrapper[4884]: > podSandboxID="0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b" Nov 28 16:41:27 crc kubenswrapper[4884]: E1128 16:41:27.900853 4884 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 16:41:27 crc kubenswrapper[4884]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n8chc6h5bh56fh546hb7hc8h67h5bchffh577h697h5b5h5bdh59bhf6hf4h558hb5h578h595h5cchfbh644h59ch7fh654h547h587h5cbh5d5h8fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qfkt8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5d7b5456f5-vrgzj_openstack(eecbf919-102c-4f0b-bd7b-b58a9b70c2bd): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 16:41:27 crc kubenswrapper[4884]: > logger="UnhandledError" Nov 28 16:41:27 crc kubenswrapper[4884]: I1128 16:41:27.900891 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:41:27 crc kubenswrapper[4884]: E1128 16:41:27.904387 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-default\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032481 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032524 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqtgd\" (UniqueName: \"kubernetes.io/projected/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kube-api-access-mqtgd\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032654 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-secrets\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.032863 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kolla-config\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.133977 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134041 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134100 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134129 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-secrets\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kolla-config\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134200 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-default\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134236 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqtgd\" (UniqueName: \"kubernetes.io/projected/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kube-api-access-mqtgd\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.134298 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.135680 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-default\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.135935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.136010 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e64ccfeb-af75-4fa7-adb5-919c41a2f261-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.136402 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kolla-config\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.138580 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-secrets\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.138660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.138973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e64ccfeb-af75-4fa7-adb5-919c41a2f261-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.139335 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.139368 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9e52e25598703ed21a2af3699f3f7f80bbdb3cec0350ed7b90e5c5f1a570b33c/globalmount\"" pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.267192 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.269568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqtgd\" (UniqueName: \"kubernetes.io/projected/e64ccfeb-af75-4fa7-adb5-919c41a2f261-kube-api-access-mqtgd\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.273049 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.276767 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.276873 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-dbnt8" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.288923 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.438642 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxpl5\" (UniqueName: \"kubernetes.io/projected/b6285863-9c40-4877-891e-acc716bb80d4-kube-api-access-kxpl5\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.438696 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-config-data\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.438825 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-kolla-config\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.455506 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f8d3f6f9-5ef8-4ee0-9652-1d1f6bf3cdd8\") pod \"openstack-galera-0\" (UID: \"e64ccfeb-af75-4fa7-adb5-919c41a2f261\") " pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.539930 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxpl5\" (UniqueName: \"kubernetes.io/projected/b6285863-9c40-4877-891e-acc716bb80d4-kube-api-access-kxpl5\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.539980 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-config-data\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.540086 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-kolla-config\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.540897 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-kolla-config\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.541140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b6285863-9c40-4877-891e-acc716bb80d4-config-data\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.551875 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.559555 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxpl5\" (UniqueName: \"kubernetes.io/projected/b6285863-9c40-4877-891e-acc716bb80d4-kube-api-access-kxpl5\") pod \"memcached-0\" (UID: \"b6285863-9c40-4877-891e-acc716bb80d4\") " pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.603445 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.730229 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerStarted","Data":"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853"} Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.732367 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" event={"ID":"9c99def4-9ef7-4685-9258-1cc1256967c2","Type":"ContainerStarted","Data":"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139"} Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.732511 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.733198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerStarted","Data":"5f095134e19aa4f6382e9f5a8e3b7ba3ba643c9941a12e8d1016c68a1fe5343d"} Nov 28 16:41:28 crc kubenswrapper[4884]: I1128 16:41:28.794540 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" podStartSLOduration=3.794520349 podStartE2EDuration="3.794520349s" podCreationTimestamp="2025-11-28 16:41:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:41:28.792103321 +0000 UTC m=+4928.354887122" watchObservedRunningTime="2025-11-28 16:41:28.794520349 +0000 UTC m=+4928.357304150" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.080624 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.166098 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 16:41:29 crc kubenswrapper[4884]: W1128 16:41:29.172965 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6285863_9c40_4877_891e_acc716bb80d4.slice/crio-55dca4570bbfb0e4c9fee3d313357ccccf647169da2fe129747f5e043d356e3e WatchSource:0}: Error finding container 55dca4570bbfb0e4c9fee3d313357ccccf647169da2fe129747f5e043d356e3e: Status 404 returned error can't find the container with id 55dca4570bbfb0e4c9fee3d313357ccccf647169da2fe129747f5e043d356e3e Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.402448 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.403700 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.407800 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.407878 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.409129 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-6zw5l" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.409538 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.416055 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.562879 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndwdh\" (UniqueName: \"kubernetes.io/projected/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kube-api-access-ndwdh\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.562949 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.562971 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563060 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563106 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563127 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.563157 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664043 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664116 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664162 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664206 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndwdh\" (UniqueName: \"kubernetes.io/projected/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kube-api-access-ndwdh\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664255 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664352 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.664411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.665310 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.665928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.666232 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.666485 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.667936 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.667971 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0c7fa38e90f01996604fb8082d788afd3444256ba7191e7e92a470b64309f847/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.670399 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.670569 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.671794 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.682891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndwdh\" (UniqueName: \"kubernetes.io/projected/c3fd4b88-fe6f-4f27-8e1c-26360e576cf6-kube-api-access-ndwdh\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.696306 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5f6f4fe5-a251-45f4-be30-a715b3e4c269\") pod \"openstack-cell1-galera-0\" (UID: \"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6\") " pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.722597 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.743479 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e64ccfeb-af75-4fa7-adb5-919c41a2f261","Type":"ContainerStarted","Data":"db32bf068322b03f2ae72d77c5580e9656e87f90344b6f3d21e891492b6eed17"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.743550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e64ccfeb-af75-4fa7-adb5-919c41a2f261","Type":"ContainerStarted","Data":"bb2c5a19dc4ed9e9a5a584e955394c2e10e64accc82cc39af70e2d22d14d3dd2"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.749761 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" event={"ID":"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd","Type":"ContainerStarted","Data":"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.750060 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.751980 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerStarted","Data":"2506eaf0669bbd0b8dcb617ce07cee5fcc80c0ee2b03b778da550dfdcdee3342"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.753420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b6285863-9c40-4877-891e-acc716bb80d4","Type":"ContainerStarted","Data":"1ba7186b5586abc8dcb09f6ec73b2fdbfb822fe05f0004fc57d7180ecb95468f"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.753450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"b6285863-9c40-4877-891e-acc716bb80d4","Type":"ContainerStarted","Data":"55dca4570bbfb0e4c9fee3d313357ccccf647169da2fe129747f5e043d356e3e"} Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.753717 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.801612 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.801585618 podStartE2EDuration="1.801585618s" podCreationTimestamp="2025-11-28 16:41:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:41:29.800587043 +0000 UTC m=+4929.363370844" watchObservedRunningTime="2025-11-28 16:41:29.801585618 +0000 UTC m=+4929.364369429" Nov 28 16:41:29 crc kubenswrapper[4884]: I1128 16:41:29.833813 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" podStartSLOduration=4.833782237 podStartE2EDuration="4.833782237s" podCreationTimestamp="2025-11-28 16:41:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:41:29.820140042 +0000 UTC m=+4929.382923863" watchObservedRunningTime="2025-11-28 16:41:29.833782237 +0000 UTC m=+4929.396566038" Nov 28 16:41:30 crc kubenswrapper[4884]: I1128 16:41:30.283478 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 16:41:30 crc kubenswrapper[4884]: I1128 16:41:30.764506 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6","Type":"ContainerStarted","Data":"e753ed4374864d8f44a7b4170137f722c1a0fb5a3071530585e2f0195eaffd73"} Nov 28 16:41:30 crc kubenswrapper[4884]: I1128 16:41:30.764874 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6","Type":"ContainerStarted","Data":"166f5783a7dcba35dd2443afd6d618a758d01a0ce83692a4b62e2b667f6d3ffe"} Nov 28 16:41:33 crc kubenswrapper[4884]: I1128 16:41:33.787197 4884 generic.go:334] "Generic (PLEG): container finished" podID="e64ccfeb-af75-4fa7-adb5-919c41a2f261" containerID="db32bf068322b03f2ae72d77c5580e9656e87f90344b6f3d21e891492b6eed17" exitCode=0 Nov 28 16:41:33 crc kubenswrapper[4884]: I1128 16:41:33.787611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e64ccfeb-af75-4fa7-adb5-919c41a2f261","Type":"ContainerDied","Data":"db32bf068322b03f2ae72d77c5580e9656e87f90344b6f3d21e891492b6eed17"} Nov 28 16:41:34 crc kubenswrapper[4884]: I1128 16:41:34.802069 4884 generic.go:334] "Generic (PLEG): container finished" podID="c3fd4b88-fe6f-4f27-8e1c-26360e576cf6" containerID="e753ed4374864d8f44a7b4170137f722c1a0fb5a3071530585e2f0195eaffd73" exitCode=0 Nov 28 16:41:34 crc kubenswrapper[4884]: I1128 16:41:34.802169 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6","Type":"ContainerDied","Data":"e753ed4374864d8f44a7b4170137f722c1a0fb5a3071530585e2f0195eaffd73"} Nov 28 16:41:34 crc kubenswrapper[4884]: I1128 16:41:34.808893 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e64ccfeb-af75-4fa7-adb5-919c41a2f261","Type":"ContainerStarted","Data":"292389d0915ff6be1ac0fd7767ef55ee395ba271b436a2836de7784261fe57df"} Nov 28 16:41:34 crc kubenswrapper[4884]: I1128 16:41:34.878229 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.878197198 podStartE2EDuration="8.878197198s" podCreationTimestamp="2025-11-28 16:41:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:41:34.864435481 +0000 UTC m=+4934.427219372" watchObservedRunningTime="2025-11-28 16:41:34.878197198 +0000 UTC m=+4934.440981039" Nov 28 16:41:35 crc kubenswrapper[4884]: I1128 16:41:35.703426 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:35 crc kubenswrapper[4884]: I1128 16:41:35.825443 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"c3fd4b88-fe6f-4f27-8e1c-26360e576cf6","Type":"ContainerStarted","Data":"a5dca20e1bee518cb3a7ca5103641eb3a57f537c0eb1671ee49944e5dbffe66b"} Nov 28 16:41:35 crc kubenswrapper[4884]: I1128 16:41:35.853591 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.853567849 podStartE2EDuration="7.853567849s" podCreationTimestamp="2025-11-28 16:41:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:41:35.845889051 +0000 UTC m=+4935.408672852" watchObservedRunningTime="2025-11-28 16:41:35.853567849 +0000 UTC m=+4935.416351660" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.060756 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.104951 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.105197 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="dnsmasq-dns" containerID="cri-o://e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f" gracePeriod=10 Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.627686 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.802601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfkt8\" (UniqueName: \"kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8\") pod \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.802802 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config\") pod \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.802856 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc\") pod \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\" (UID: \"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd\") " Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.808464 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8" (OuterVolumeSpecName: "kube-api-access-qfkt8") pod "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" (UID: "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd"). InnerVolumeSpecName "kube-api-access-qfkt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.836429 4884 generic.go:334] "Generic (PLEG): container finished" podID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerID="e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f" exitCode=0 Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.836480 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" event={"ID":"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd","Type":"ContainerDied","Data":"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f"} Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.836519 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" event={"ID":"eecbf919-102c-4f0b-bd7b-b58a9b70c2bd","Type":"ContainerDied","Data":"0a6d10033c11721e080c52f04164723627b9d09649c555f12d015c927057d81b"} Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.836522 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-vrgzj" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.836542 4884 scope.go:117] "RemoveContainer" containerID="e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.844188 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config" (OuterVolumeSpecName: "config") pod "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" (UID: "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.850984 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" (UID: "eecbf919-102c-4f0b-bd7b-b58a9b70c2bd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.891554 4884 scope.go:117] "RemoveContainer" containerID="1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.904528 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.904573 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.904587 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfkt8\" (UniqueName: \"kubernetes.io/projected/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd-kube-api-access-qfkt8\") on node \"crc\" DevicePath \"\"" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.918849 4884 scope.go:117] "RemoveContainer" containerID="e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f" Nov 28 16:41:36 crc kubenswrapper[4884]: E1128 16:41:36.919732 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f\": container with ID starting with e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f not found: ID does not exist" containerID="e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.919789 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f"} err="failed to get container status \"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f\": rpc error: code = NotFound desc = could not find container \"e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f\": container with ID starting with e5a5a1cb2ef29eb41feea4668a3f1eee238eb76ed59ac8f63e25f039bf37483f not found: ID does not exist" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.919834 4884 scope.go:117] "RemoveContainer" containerID="1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e" Nov 28 16:41:36 crc kubenswrapper[4884]: E1128 16:41:36.920536 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e\": container with ID starting with 1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e not found: ID does not exist" containerID="1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e" Nov 28 16:41:36 crc kubenswrapper[4884]: I1128 16:41:36.920607 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e"} err="failed to get container status \"1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e\": rpc error: code = NotFound desc = could not find container \"1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e\": container with ID starting with 1494bc6f99ca47e5745e530883391869aa557bc07122721f0e884c0f72b7f45e not found: ID does not exist" Nov 28 16:41:37 crc kubenswrapper[4884]: I1128 16:41:37.190987 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:37 crc kubenswrapper[4884]: I1128 16:41:37.201448 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-vrgzj"] Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.552064 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.552365 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.600139 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.604080 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.696707 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" path="/var/lib/kubelet/pods/eecbf919-102c-4f0b-bd7b-b58a9b70c2bd/volumes" Nov 28 16:41:38 crc kubenswrapper[4884]: I1128 16:41:38.910619 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 16:41:39 crc kubenswrapper[4884]: I1128 16:41:39.723381 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:39 crc kubenswrapper[4884]: I1128 16:41:39.723740 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:39 crc kubenswrapper[4884]: I1128 16:41:39.787780 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 16:41:39 crc kubenswrapper[4884]: I1128 16:41:39.901823 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 16:42:01 crc kubenswrapper[4884]: I1128 16:42:01.047714 4884 generic.go:334] "Generic (PLEG): container finished" podID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerID="2506eaf0669bbd0b8dcb617ce07cee5fcc80c0ee2b03b778da550dfdcdee3342" exitCode=0 Nov 28 16:42:01 crc kubenswrapper[4884]: I1128 16:42:01.047835 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerDied","Data":"2506eaf0669bbd0b8dcb617ce07cee5fcc80c0ee2b03b778da550dfdcdee3342"} Nov 28 16:42:01 crc kubenswrapper[4884]: I1128 16:42:01.052540 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerID="ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853" exitCode=0 Nov 28 16:42:01 crc kubenswrapper[4884]: I1128 16:42:01.052586 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerDied","Data":"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853"} Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.064969 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerStarted","Data":"61f6671bf2d7be466148866f3c059c505258ef8b62e4477d08466beccd7193fb"} Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.065835 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.068759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerStarted","Data":"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa"} Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.068955 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.093996 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.093976265 podStartE2EDuration="37.093976265s" podCreationTimestamp="2025-11-28 16:41:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:42:02.091470724 +0000 UTC m=+4961.654254535" watchObservedRunningTime="2025-11-28 16:42:02.093976265 +0000 UTC m=+4961.656760076" Nov 28 16:42:02 crc kubenswrapper[4884]: I1128 16:42:02.119878 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.11985673 podStartE2EDuration="37.11985673s" podCreationTimestamp="2025-11-28 16:41:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:42:02.114819466 +0000 UTC m=+4961.677603307" watchObservedRunningTime="2025-11-28 16:42:02.11985673 +0000 UTC m=+4961.682640541" Nov 28 16:42:16 crc kubenswrapper[4884]: I1128 16:42:16.899328 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 16:42:17 crc kubenswrapper[4884]: I1128 16:42:17.268789 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.030533 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:42:24 crc kubenswrapper[4884]: E1128 16:42:24.031549 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="dnsmasq-dns" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.031573 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="dnsmasq-dns" Nov 28 16:42:24 crc kubenswrapper[4884]: E1128 16:42:24.031602 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="init" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.031635 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="init" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.031904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="eecbf919-102c-4f0b-bd7b-b58a9b70c2bd" containerName="dnsmasq-dns" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.032996 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.053060 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.159019 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.159162 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6w22\" (UniqueName: \"kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.159206 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.260698 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.261020 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6w22\" (UniqueName: \"kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.261163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.261912 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.261966 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.283180 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6w22\" (UniqueName: \"kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22\") pod \"dnsmasq-dns-5b7946d7b9-cbph4\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.350220 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.659311 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:24 crc kubenswrapper[4884]: I1128 16:42:24.791974 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:42:25 crc kubenswrapper[4884]: I1128 16:42:25.182119 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:25 crc kubenswrapper[4884]: I1128 16:42:25.314605 4884 generic.go:334] "Generic (PLEG): container finished" podID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerID="a21b8a5949b0143932059b176c951f3338bb15184632f84e54d23e026b52482c" exitCode=0 Nov 28 16:42:25 crc kubenswrapper[4884]: I1128 16:42:25.314665 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" event={"ID":"4d5471a8-72a3-4a6c-a70d-12daf2c94973","Type":"ContainerDied","Data":"a21b8a5949b0143932059b176c951f3338bb15184632f84e54d23e026b52482c"} Nov 28 16:42:25 crc kubenswrapper[4884]: I1128 16:42:25.314698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" event={"ID":"4d5471a8-72a3-4a6c-a70d-12daf2c94973","Type":"ContainerStarted","Data":"13bec95d5200dfb8e660cc92671f6fbb63107626f8c5f7d02b0db44007ba206b"} Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.324157 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" event={"ID":"4d5471a8-72a3-4a6c-a70d-12daf2c94973","Type":"ContainerStarted","Data":"40376a42be5cc3a62b91ddf5068ec0b08458201dea3305d383598b436418a960"} Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.324824 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.348922 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" podStartSLOduration=2.348903537 podStartE2EDuration="2.348903537s" podCreationTimestamp="2025-11-28 16:42:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:42:26.34540607 +0000 UTC m=+4985.908189881" watchObservedRunningTime="2025-11-28 16:42:26.348903537 +0000 UTC m=+4985.911687348" Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.380416 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="rabbitmq" containerID="cri-o://5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa" gracePeriod=604799 Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.897570 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.238:5672: connect: connection refused" Nov 28 16:42:26 crc kubenswrapper[4884]: I1128 16:42:26.915180 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="rabbitmq" containerID="cri-o://61f6671bf2d7be466148866f3c059c505258ef8b62e4477d08466beccd7193fb" gracePeriod=604799 Nov 28 16:42:27 crc kubenswrapper[4884]: I1128 16:42:27.267379 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.239:5672: connect: connection refused" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.183613 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303465 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4h6vd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303524 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303553 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303649 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303670 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303801 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.303893 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf\") pod \"fa56250e-f38b-4f64-ad71-325c78aafda3\" (UID: \"fa56250e-f38b-4f64-ad71-325c78aafda3\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.304042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.304218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.304241 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.304683 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.308331 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.309715 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info" (OuterVolumeSpecName: "pod-info") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.314447 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd" (OuterVolumeSpecName: "kube-api-access-4h6vd") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "kube-api-access-4h6vd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.320214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2" (OuterVolumeSpecName: "persistence") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "pvc-f87b579c-cc49-4777-96f3-baed14398ab2". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.378522 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf" (OuterVolumeSpecName: "server-conf") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.392307 4884 generic.go:334] "Generic (PLEG): container finished" podID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerID="61f6671bf2d7be466148866f3c059c505258ef8b62e4477d08466beccd7193fb" exitCode=0 Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.392370 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerDied","Data":"61f6671bf2d7be466148866f3c059c505258ef8b62e4477d08466beccd7193fb"} Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.399423 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerID="5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa" exitCode=0 Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.399469 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerDied","Data":"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa"} Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.399498 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"fa56250e-f38b-4f64-ad71-325c78aafda3","Type":"ContainerDied","Data":"4ab68a337654d03ad4e0841c8542e9e0f05cd55a9920da6112d9927f8424d43f"} Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.399519 4884 scope.go:117] "RemoveContainer" containerID="5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.399757 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.406799 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/fa56250e-f38b-4f64-ad71-325c78aafda3-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.406899 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") on node \"crc\" " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.406914 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.406925 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4h6vd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-kube-api-access-4h6vd\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.406935 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.407209 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/fa56250e-f38b-4f64-ad71-325c78aafda3-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.407220 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/fa56250e-f38b-4f64-ad71-325c78aafda3-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.429514 4884 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.429676 4884 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f87b579c-cc49-4777-96f3-baed14398ab2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2") on node "crc" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.442745 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "fa56250e-f38b-4f64-ad71-325c78aafda3" (UID: "fa56250e-f38b-4f64-ad71-325c78aafda3"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.459050 4884 scope.go:117] "RemoveContainer" containerID="ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.474371 4884 scope.go:117] "RemoveContainer" containerID="5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa" Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.474815 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa\": container with ID starting with 5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa not found: ID does not exist" containerID="5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.474856 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa"} err="failed to get container status \"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa\": rpc error: code = NotFound desc = could not find container \"5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa\": container with ID starting with 5d6789636b25078d8f76d3cfc4756a3286c0cb11179d81f0a19c042a1cd9eeaa not found: ID does not exist" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.474889 4884 scope.go:117] "RemoveContainer" containerID="ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853" Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.475365 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853\": container with ID starting with ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853 not found: ID does not exist" containerID="ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.475380 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853"} err="failed to get container status \"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853\": rpc error: code = NotFound desc = could not find container \"ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853\": container with ID starting with ed6b973caf0575cb07e38675a467fcb02b185e26cdd41bb22fee189f72605853 not found: ID does not exist" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.484931 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.513524 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/fa56250e-f38b-4f64-ad71-325c78aafda3-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.513552 4884 reconciler_common.go:293] "Volume detached for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614272 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614331 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614377 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614408 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614452 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb4dl\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614477 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.614509 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.615054 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.615134 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf\") pod \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\" (UID: \"81d9d69a-df2c-45b6-aced-2a41a9f91c67\") " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.615437 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.615465 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.615554 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.617841 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info" (OuterVolumeSpecName: "pod-info") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.617873 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl" (OuterVolumeSpecName: "kube-api-access-rb4dl") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "kube-api-access-rb4dl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.618003 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.624366 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3" (OuterVolumeSpecName: "persistence") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "pvc-959aea4b-829b-4af6-be73-7622f87f33b3". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.633975 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf" (OuterVolumeSpecName: "server-conf") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.693128 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "81d9d69a-df2c-45b6-aced-2a41a9f91c67" (UID: "81d9d69a-df2c-45b6-aced-2a41a9f91c67"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.716441 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/81d9d69a-df2c-45b6-aced-2a41a9f91c67-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.716645 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") on node \"crc\" " Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.716747 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.716849 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/81d9d69a-df2c-45b6-aced-2a41a9f91c67-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.716942 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.717025 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.717180 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/81d9d69a-df2c-45b6-aced-2a41a9f91c67-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.717299 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb4dl\" (UniqueName: \"kubernetes.io/projected/81d9d69a-df2c-45b6-aced-2a41a9f91c67-kube-api-access-rb4dl\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.717391 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/81d9d69a-df2c-45b6-aced-2a41a9f91c67-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.737066 4884 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.737231 4884 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-959aea4b-829b-4af6-be73-7622f87f33b3" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3") on node "crc" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.740511 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.754189 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.764919 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.766662 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="setup-container" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.766734 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="setup-container" Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.766753 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.766761 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.766836 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="setup-container" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.766845 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="setup-container" Nov 28 16:42:33 crc kubenswrapper[4884]: E1128 16:42:33.766889 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.766923 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.767872 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.767915 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" containerName="rabbitmq" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.786137 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.786769 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.789564 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.791350 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.791481 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.791690 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-l5xvh" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.792062 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819615 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjjh4\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-kube-api-access-rjjh4\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819736 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819767 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819796 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9c35913-c5bf-4459-8252-a5ba99fb302b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819818 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.819970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9c35913-c5bf-4459-8252-a5ba99fb302b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.820010 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.820117 4884 reconciler_common.go:293] "Volume detached for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921529 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921653 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9c35913-c5bf-4459-8252-a5ba99fb302b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921681 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921734 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9c35913-c5bf-4459-8252-a5ba99fb302b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921794 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.921900 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjjh4\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-kube-api-access-rjjh4\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.922767 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.924784 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.925553 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.925604 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/581d5aade5c9467420aad429baaa5fa1bb1cbf44a236a1cafcd4427634e1135a/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.926065 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b9c35913-c5bf-4459-8252-a5ba99fb302b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.926140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b9c35913-c5bf-4459-8252-a5ba99fb302b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.924653 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.927276 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b9c35913-c5bf-4459-8252-a5ba99fb302b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.927488 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.946534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjjh4\" (UniqueName: \"kubernetes.io/projected/b9c35913-c5bf-4459-8252-a5ba99fb302b-kube-api-access-rjjh4\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:33 crc kubenswrapper[4884]: I1128 16:42:33.961541 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f87b579c-cc49-4777-96f3-baed14398ab2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f87b579c-cc49-4777-96f3-baed14398ab2\") pod \"rabbitmq-server-0\" (UID: \"b9c35913-c5bf-4459-8252-a5ba99fb302b\") " pod="openstack/rabbitmq-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.108388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.352227 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.411679 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"81d9d69a-df2c-45b6-aced-2a41a9f91c67","Type":"ContainerDied","Data":"5f095134e19aa4f6382e9f5a8e3b7ba3ba643c9941a12e8d1016c68a1fe5343d"} Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.411736 4884 scope.go:117] "RemoveContainer" containerID="61f6671bf2d7be466148866f3c059c505258ef8b62e4477d08466beccd7193fb" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.411884 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.425941 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.426255 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="dnsmasq-dns" containerID="cri-o://dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139" gracePeriod=10 Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.452450 4884 scope.go:117] "RemoveContainer" containerID="2506eaf0669bbd0b8dcb617ce07cee5fcc80c0ee2b03b778da550dfdcdee3342" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.464607 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.470903 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.486475 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.487599 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.490665 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.490682 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-dqprr" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.490816 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.490956 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.492439 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.506229 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.531876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.531915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/00782f1b-8777-480e-bb85-1a4cafb77cee-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.531952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.531990 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.532024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.532040 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.532053 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.532152 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-948hq\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-kube-api-access-948hq\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.532177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/00782f1b-8777-480e-bb85-1a4cafb77cee-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.547106 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 16:42:34 crc kubenswrapper[4884]: W1128 16:42:34.547133 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9c35913_c5bf_4459_8252_a5ba99fb302b.slice/crio-70f572f3ebc31e0af2a765dc2cdc69d7d2132e96b62c1db0aa046c2bf06d25c2 WatchSource:0}: Error finding container 70f572f3ebc31e0af2a765dc2cdc69d7d2132e96b62c1db0aa046c2bf06d25c2: Status 404 returned error can't find the container with id 70f572f3ebc31e0af2a765dc2cdc69d7d2132e96b62c1db0aa046c2bf06d25c2 Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633379 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633429 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633450 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633472 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-948hq\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-kube-api-access-948hq\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633546 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/00782f1b-8777-480e-bb85-1a4cafb77cee-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633600 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633627 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/00782f1b-8777-480e-bb85-1a4cafb77cee-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.633659 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.634194 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.634305 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.634630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.634868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/00782f1b-8777-480e-bb85-1a4cafb77cee-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.637003 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/00782f1b-8777-480e-bb85-1a4cafb77cee-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.637315 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/00782f1b-8777-480e-bb85-1a4cafb77cee-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.637766 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.639160 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.639201 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1117efca265b3507eb58d954574e4372ae58838f2807435990a8e560db25e330/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.650808 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-948hq\" (UniqueName: \"kubernetes.io/projected/00782f1b-8777-480e-bb85-1a4cafb77cee-kube-api-access-948hq\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.678863 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-959aea4b-829b-4af6-be73-7622f87f33b3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-959aea4b-829b-4af6-be73-7622f87f33b3\") pod \"rabbitmq-cell1-server-0\" (UID: \"00782f1b-8777-480e-bb85-1a4cafb77cee\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.697118 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81d9d69a-df2c-45b6-aced-2a41a9f91c67" path="/var/lib/kubelet/pods/81d9d69a-df2c-45b6-aced-2a41a9f91c67/volumes" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.697809 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa56250e-f38b-4f64-ad71-325c78aafda3" path="/var/lib/kubelet/pods/fa56250e-f38b-4f64-ad71-325c78aafda3/volumes" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.801117 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.838002 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc\") pod \"9c99def4-9ef7-4685-9258-1cc1256967c2\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.838378 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhgb2\" (UniqueName: \"kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2\") pod \"9c99def4-9ef7-4685-9258-1cc1256967c2\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.838471 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config\") pod \"9c99def4-9ef7-4685-9258-1cc1256967c2\" (UID: \"9c99def4-9ef7-4685-9258-1cc1256967c2\") " Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.845672 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.849331 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2" (OuterVolumeSpecName: "kube-api-access-hhgb2") pod "9c99def4-9ef7-4685-9258-1cc1256967c2" (UID: "9c99def4-9ef7-4685-9258-1cc1256967c2"). InnerVolumeSpecName "kube-api-access-hhgb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.881485 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config" (OuterVolumeSpecName: "config") pod "9c99def4-9ef7-4685-9258-1cc1256967c2" (UID: "9c99def4-9ef7-4685-9258-1cc1256967c2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.882867 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9c99def4-9ef7-4685-9258-1cc1256967c2" (UID: "9c99def4-9ef7-4685-9258-1cc1256967c2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.939868 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.939898 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9c99def4-9ef7-4685-9258-1cc1256967c2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:34 crc kubenswrapper[4884]: I1128 16:42:34.939911 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhgb2\" (UniqueName: \"kubernetes.io/projected/9c99def4-9ef7-4685-9258-1cc1256967c2-kube-api-access-hhgb2\") on node \"crc\" DevicePath \"\"" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.257275 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 16:42:35 crc kubenswrapper[4884]: W1128 16:42:35.351671 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00782f1b_8777_480e_bb85_1a4cafb77cee.slice/crio-9f45f62159484de262752113a3178fb8fc410a3d4b07ff1ed27dbae31d9fe233 WatchSource:0}: Error finding container 9f45f62159484de262752113a3178fb8fc410a3d4b07ff1ed27dbae31d9fe233: Status 404 returned error can't find the container with id 9f45f62159484de262752113a3178fb8fc410a3d4b07ff1ed27dbae31d9fe233 Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.428323 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"00782f1b-8777-480e-bb85-1a4cafb77cee","Type":"ContainerStarted","Data":"9f45f62159484de262752113a3178fb8fc410a3d4b07ff1ed27dbae31d9fe233"} Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.432297 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b9c35913-c5bf-4459-8252-a5ba99fb302b","Type":"ContainerStarted","Data":"70f572f3ebc31e0af2a765dc2cdc69d7d2132e96b62c1db0aa046c2bf06d25c2"} Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.434078 4884 generic.go:334] "Generic (PLEG): container finished" podID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerID="dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139" exitCode=0 Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.434134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" event={"ID":"9c99def4-9ef7-4685-9258-1cc1256967c2","Type":"ContainerDied","Data":"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139"} Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.434156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" event={"ID":"9c99def4-9ef7-4685-9258-1cc1256967c2","Type":"ContainerDied","Data":"1a7c2f9599341fc9dd6570b82b5922df1b9ed132b82ad961c1dfd4770795591c"} Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.434174 4884 scope.go:117] "RemoveContainer" containerID="dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.434193 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-hbgxz" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.471941 4884 scope.go:117] "RemoveContainer" containerID="49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.481224 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.486716 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-hbgxz"] Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.567235 4884 scope.go:117] "RemoveContainer" containerID="dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139" Nov 28 16:42:35 crc kubenswrapper[4884]: E1128 16:42:35.567886 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139\": container with ID starting with dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139 not found: ID does not exist" containerID="dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.567962 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139"} err="failed to get container status \"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139\": rpc error: code = NotFound desc = could not find container \"dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139\": container with ID starting with dba4a6409d050ff6d2793c37cbe86d6e239f74967a0e52e578579dac5c827139 not found: ID does not exist" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.568007 4884 scope.go:117] "RemoveContainer" containerID="49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655" Nov 28 16:42:35 crc kubenswrapper[4884]: E1128 16:42:35.568635 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655\": container with ID starting with 49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655 not found: ID does not exist" containerID="49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655" Nov 28 16:42:35 crc kubenswrapper[4884]: I1128 16:42:35.568666 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655"} err="failed to get container status \"49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655\": rpc error: code = NotFound desc = could not find container \"49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655\": container with ID starting with 49546dfd0dbea31f730c70fc4d2067600e4c0b7037483beb436409fc1231f655 not found: ID does not exist" Nov 28 16:42:36 crc kubenswrapper[4884]: I1128 16:42:36.455626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b9c35913-c5bf-4459-8252-a5ba99fb302b","Type":"ContainerStarted","Data":"07ba3a8966462bc96fb223dae710121fac837a811620d1f73731b0db9366e003"} Nov 28 16:42:36 crc kubenswrapper[4884]: I1128 16:42:36.704809 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" path="/var/lib/kubelet/pods/9c99def4-9ef7-4685-9258-1cc1256967c2/volumes" Nov 28 16:42:37 crc kubenswrapper[4884]: I1128 16:42:37.467177 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"00782f1b-8777-480e-bb85-1a4cafb77cee","Type":"ContainerStarted","Data":"9e5cf36141118d7c3e4d7b565158bae83911b1dc08af444c265394e2566b0c2e"} Nov 28 16:43:08 crc kubenswrapper[4884]: I1128 16:43:08.752892 4884 generic.go:334] "Generic (PLEG): container finished" podID="b9c35913-c5bf-4459-8252-a5ba99fb302b" containerID="07ba3a8966462bc96fb223dae710121fac837a811620d1f73731b0db9366e003" exitCode=0 Nov 28 16:43:08 crc kubenswrapper[4884]: I1128 16:43:08.753012 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b9c35913-c5bf-4459-8252-a5ba99fb302b","Type":"ContainerDied","Data":"07ba3a8966462bc96fb223dae710121fac837a811620d1f73731b0db9366e003"} Nov 28 16:43:09 crc kubenswrapper[4884]: I1128 16:43:09.762837 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"b9c35913-c5bf-4459-8252-a5ba99fb302b","Type":"ContainerStarted","Data":"6b9669fad11e9b38f6be11d0614002ccef39d55448c94b9ffa90f35c94a7482e"} Nov 28 16:43:09 crc kubenswrapper[4884]: I1128 16:43:09.764266 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 16:43:09 crc kubenswrapper[4884]: I1128 16:43:09.765781 4884 generic.go:334] "Generic (PLEG): container finished" podID="00782f1b-8777-480e-bb85-1a4cafb77cee" containerID="9e5cf36141118d7c3e4d7b565158bae83911b1dc08af444c265394e2566b0c2e" exitCode=0 Nov 28 16:43:09 crc kubenswrapper[4884]: I1128 16:43:09.765840 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"00782f1b-8777-480e-bb85-1a4cafb77cee","Type":"ContainerDied","Data":"9e5cf36141118d7c3e4d7b565158bae83911b1dc08af444c265394e2566b0c2e"} Nov 28 16:43:09 crc kubenswrapper[4884]: I1128 16:43:09.802934 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.802915714 podStartE2EDuration="36.802915714s" podCreationTimestamp="2025-11-28 16:42:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:43:09.79463543 +0000 UTC m=+5029.357419241" watchObservedRunningTime="2025-11-28 16:43:09.802915714 +0000 UTC m=+5029.365699515" Nov 28 16:43:10 crc kubenswrapper[4884]: I1128 16:43:10.776057 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"00782f1b-8777-480e-bb85-1a4cafb77cee","Type":"ContainerStarted","Data":"6cd406f72e50844f84941647f6adfc9373db0cc547a4bb141cc251d1c9050dc9"} Nov 28 16:43:10 crc kubenswrapper[4884]: I1128 16:43:10.808888 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.808862573 podStartE2EDuration="36.808862573s" podCreationTimestamp="2025-11-28 16:42:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:43:10.800794336 +0000 UTC m=+5030.363578137" watchObservedRunningTime="2025-11-28 16:43:10.808862573 +0000 UTC m=+5030.371646394" Nov 28 16:43:14 crc kubenswrapper[4884]: I1128 16:43:14.847113 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:43:21 crc kubenswrapper[4884]: I1128 16:43:21.243656 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:43:21 crc kubenswrapper[4884]: I1128 16:43:21.244601 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:43:24 crc kubenswrapper[4884]: I1128 16:43:24.112243 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 16:43:24 crc kubenswrapper[4884]: I1128 16:43:24.848319 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.396766 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 16:43:36 crc kubenswrapper[4884]: E1128 16:43:36.398122 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="dnsmasq-dns" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.398144 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="dnsmasq-dns" Nov 28 16:43:36 crc kubenswrapper[4884]: E1128 16:43:36.398167 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="init" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.398178 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="init" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.398425 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c99def4-9ef7-4685-9258-1cc1256967c2" containerName="dnsmasq-dns" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.399193 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.402740 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2rn6m" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.408279 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.425206 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t88c\" (UniqueName: \"kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c\") pod \"mariadb-client-1-default\" (UID: \"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a\") " pod="openstack/mariadb-client-1-default" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.526454 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t88c\" (UniqueName: \"kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c\") pod \"mariadb-client-1-default\" (UID: \"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a\") " pod="openstack/mariadb-client-1-default" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.557080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t88c\" (UniqueName: \"kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c\") pod \"mariadb-client-1-default\" (UID: \"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a\") " pod="openstack/mariadb-client-1-default" Nov 28 16:43:36 crc kubenswrapper[4884]: I1128 16:43:36.722833 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 16:43:37 crc kubenswrapper[4884]: I1128 16:43:37.241394 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 16:43:38 crc kubenswrapper[4884]: I1128 16:43:38.021822 4884 generic.go:334] "Generic (PLEG): container finished" podID="18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" containerID="73c47078a07737ca8138397208886f8fb6add579c2d45eec3d0b4f356df1c0eb" exitCode=0 Nov 28 16:43:38 crc kubenswrapper[4884]: I1128 16:43:38.022007 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a","Type":"ContainerDied","Data":"73c47078a07737ca8138397208886f8fb6add579c2d45eec3d0b4f356df1c0eb"} Nov 28 16:43:38 crc kubenswrapper[4884]: I1128 16:43:38.022115 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a","Type":"ContainerStarted","Data":"1554d78b73668fb17a6da59fd5d23cb3edc7ed3c1eade35b6a8687313a7eaeb3"} Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.463524 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.487785 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a/mariadb-client-1-default/0.log" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.511383 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.517054 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.572140 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2t88c\" (UniqueName: \"kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c\") pod \"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a\" (UID: \"18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a\") " Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.577269 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c" (OuterVolumeSpecName: "kube-api-access-2t88c") pod "18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" (UID: "18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a"). InnerVolumeSpecName "kube-api-access-2t88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.674999 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2t88c\" (UniqueName: \"kubernetes.io/projected/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a-kube-api-access-2t88c\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.931475 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 16:43:39 crc kubenswrapper[4884]: E1128 16:43:39.932281 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" containerName="mariadb-client-1-default" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.932303 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" containerName="mariadb-client-1-default" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.932532 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" containerName="mariadb-client-1-default" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.933179 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.943170 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 16:43:39 crc kubenswrapper[4884]: I1128 16:43:39.980380 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z6s9\" (UniqueName: \"kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9\") pod \"mariadb-client-2-default\" (UID: \"85defae5-c0b4-45bc-8d81-4ff06846102a\") " pod="openstack/mariadb-client-2-default" Nov 28 16:43:40 crc kubenswrapper[4884]: I1128 16:43:40.039564 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1554d78b73668fb17a6da59fd5d23cb3edc7ed3c1eade35b6a8687313a7eaeb3" Nov 28 16:43:40 crc kubenswrapper[4884]: I1128 16:43:40.039627 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 16:43:40 crc kubenswrapper[4884]: I1128 16:43:40.082565 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z6s9\" (UniqueName: \"kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9\") pod \"mariadb-client-2-default\" (UID: \"85defae5-c0b4-45bc-8d81-4ff06846102a\") " pod="openstack/mariadb-client-2-default" Nov 28 16:43:40 crc kubenswrapper[4884]: I1128 16:43:40.105971 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z6s9\" (UniqueName: \"kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9\") pod \"mariadb-client-2-default\" (UID: \"85defae5-c0b4-45bc-8d81-4ff06846102a\") " pod="openstack/mariadb-client-2-default" Nov 28 16:43:40 crc kubenswrapper[4884]: I1128 16:43:40.260747 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 16:43:41 crc kubenswrapper[4884]: I1128 16:43:40.698486 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a" path="/var/lib/kubelet/pods/18dcd4a5-7bfa-4d6f-97a3-b0ce9e9edf9a/volumes" Nov 28 16:43:41 crc kubenswrapper[4884]: I1128 16:43:41.760076 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 16:43:42 crc kubenswrapper[4884]: I1128 16:43:42.056340 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"85defae5-c0b4-45bc-8d81-4ff06846102a","Type":"ContainerStarted","Data":"6faa083a6f565b5783d2d3999112edcbe904f8ccad2f06d27c2980a3e7b405ca"} Nov 28 16:43:42 crc kubenswrapper[4884]: I1128 16:43:42.056838 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"85defae5-c0b4-45bc-8d81-4ff06846102a","Type":"ContainerStarted","Data":"4f8f4441764f6267bae7c04079e695491a4ca4cb17760f77c59d21c09168b14c"} Nov 28 16:43:42 crc kubenswrapper[4884]: I1128 16:43:42.074476 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-2-default" podStartSLOduration=3.07445422 podStartE2EDuration="3.07445422s" podCreationTimestamp="2025-11-28 16:43:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:43:42.069298033 +0000 UTC m=+5061.632081834" watchObservedRunningTime="2025-11-28 16:43:42.07445422 +0000 UTC m=+5061.637238021" Nov 28 16:43:43 crc kubenswrapper[4884]: I1128 16:43:43.066797 4884 generic.go:334] "Generic (PLEG): container finished" podID="85defae5-c0b4-45bc-8d81-4ff06846102a" containerID="6faa083a6f565b5783d2d3999112edcbe904f8ccad2f06d27c2980a3e7b405ca" exitCode=1 Nov 28 16:43:43 crc kubenswrapper[4884]: I1128 16:43:43.066880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"85defae5-c0b4-45bc-8d81-4ff06846102a","Type":"ContainerDied","Data":"6faa083a6f565b5783d2d3999112edcbe904f8ccad2f06d27c2980a3e7b405ca"} Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.450466 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.491392 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.499891 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.652151 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z6s9\" (UniqueName: \"kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9\") pod \"85defae5-c0b4-45bc-8d81-4ff06846102a\" (UID: \"85defae5-c0b4-45bc-8d81-4ff06846102a\") " Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.658167 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9" (OuterVolumeSpecName: "kube-api-access-7z6s9") pod "85defae5-c0b4-45bc-8d81-4ff06846102a" (UID: "85defae5-c0b4-45bc-8d81-4ff06846102a"). InnerVolumeSpecName "kube-api-access-7z6s9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.697794 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85defae5-c0b4-45bc-8d81-4ff06846102a" path="/var/lib/kubelet/pods/85defae5-c0b4-45bc-8d81-4ff06846102a/volumes" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.756785 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z6s9\" (UniqueName: \"kubernetes.io/projected/85defae5-c0b4-45bc-8d81-4ff06846102a-kube-api-access-7z6s9\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.956663 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 28 16:43:44 crc kubenswrapper[4884]: E1128 16:43:44.957024 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85defae5-c0b4-45bc-8d81-4ff06846102a" containerName="mariadb-client-2-default" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.957040 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="85defae5-c0b4-45bc-8d81-4ff06846102a" containerName="mariadb-client-2-default" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.957282 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="85defae5-c0b4-45bc-8d81-4ff06846102a" containerName="mariadb-client-2-default" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.957863 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 16:43:44 crc kubenswrapper[4884]: I1128 16:43:44.967992 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.059407 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bknb4\" (UniqueName: \"kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4\") pod \"mariadb-client-1\" (UID: \"66574cf1-9c56-4894-978d-72441ce519df\") " pod="openstack/mariadb-client-1" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.088311 4884 scope.go:117] "RemoveContainer" containerID="6faa083a6f565b5783d2d3999112edcbe904f8ccad2f06d27c2980a3e7b405ca" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.088332 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.160937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bknb4\" (UniqueName: \"kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4\") pod \"mariadb-client-1\" (UID: \"66574cf1-9c56-4894-978d-72441ce519df\") " pod="openstack/mariadb-client-1" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.176958 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bknb4\" (UniqueName: \"kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4\") pod \"mariadb-client-1\" (UID: \"66574cf1-9c56-4894-978d-72441ce519df\") " pod="openstack/mariadb-client-1" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.278977 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 16:43:45 crc kubenswrapper[4884]: I1128 16:43:45.780163 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 16:43:45 crc kubenswrapper[4884]: W1128 16:43:45.783803 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66574cf1_9c56_4894_978d_72441ce519df.slice/crio-87b064150bc9576c408b3cf54d44fd7a72a69f5e13f4c64d31df0fabb5b5046b WatchSource:0}: Error finding container 87b064150bc9576c408b3cf54d44fd7a72a69f5e13f4c64d31df0fabb5b5046b: Status 404 returned error can't find the container with id 87b064150bc9576c408b3cf54d44fd7a72a69f5e13f4c64d31df0fabb5b5046b Nov 28 16:43:46 crc kubenswrapper[4884]: I1128 16:43:46.102227 4884 generic.go:334] "Generic (PLEG): container finished" podID="66574cf1-9c56-4894-978d-72441ce519df" containerID="17423c1cd46a1da152bb91106b9cec01a1d8b43a02a6861f0b5054d6f22d3574" exitCode=0 Nov 28 16:43:46 crc kubenswrapper[4884]: I1128 16:43:46.102337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"66574cf1-9c56-4894-978d-72441ce519df","Type":"ContainerDied","Data":"17423c1cd46a1da152bb91106b9cec01a1d8b43a02a6861f0b5054d6f22d3574"} Nov 28 16:43:46 crc kubenswrapper[4884]: I1128 16:43:46.102857 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"66574cf1-9c56-4894-978d-72441ce519df","Type":"ContainerStarted","Data":"87b064150bc9576c408b3cf54d44fd7a72a69f5e13f4c64d31df0fabb5b5046b"} Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.491750 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.506869 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_66574cf1-9c56-4894-978d-72441ce519df/mariadb-client-1/0.log" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.530353 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.535767 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.605870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bknb4\" (UniqueName: \"kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4\") pod \"66574cf1-9c56-4894-978d-72441ce519df\" (UID: \"66574cf1-9c56-4894-978d-72441ce519df\") " Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.612909 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4" (OuterVolumeSpecName: "kube-api-access-bknb4") pod "66574cf1-9c56-4894-978d-72441ce519df" (UID: "66574cf1-9c56-4894-978d-72441ce519df"). InnerVolumeSpecName "kube-api-access-bknb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.710383 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bknb4\" (UniqueName: \"kubernetes.io/projected/66574cf1-9c56-4894-978d-72441ce519df-kube-api-access-bknb4\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.937942 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 16:43:47 crc kubenswrapper[4884]: E1128 16:43:47.938742 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66574cf1-9c56-4894-978d-72441ce519df" containerName="mariadb-client-1" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.938987 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="66574cf1-9c56-4894-978d-72441ce519df" containerName="mariadb-client-1" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.939465 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="66574cf1-9c56-4894-978d-72441ce519df" containerName="mariadb-client-1" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.940698 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 16:43:47 crc kubenswrapper[4884]: I1128 16:43:47.952777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.116151 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rgm5\" (UniqueName: \"kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5\") pod \"mariadb-client-4-default\" (UID: \"a5ba56a3-af27-49c2-ad9e-fa0448157763\") " pod="openstack/mariadb-client-4-default" Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.122526 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87b064150bc9576c408b3cf54d44fd7a72a69f5e13f4c64d31df0fabb5b5046b" Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.122850 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.219306 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rgm5\" (UniqueName: \"kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5\") pod \"mariadb-client-4-default\" (UID: \"a5ba56a3-af27-49c2-ad9e-fa0448157763\") " pod="openstack/mariadb-client-4-default" Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.238118 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rgm5\" (UniqueName: \"kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5\") pod \"mariadb-client-4-default\" (UID: \"a5ba56a3-af27-49c2-ad9e-fa0448157763\") " pod="openstack/mariadb-client-4-default" Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.266405 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 16:43:48 crc kubenswrapper[4884]: W1128 16:43:48.528504 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5ba56a3_af27_49c2_ad9e_fa0448157763.slice/crio-13a385afca2e91772b6f4e5154bd99d13252f8e3b75d0de001b89489c2b19de6 WatchSource:0}: Error finding container 13a385afca2e91772b6f4e5154bd99d13252f8e3b75d0de001b89489c2b19de6: Status 404 returned error can't find the container with id 13a385afca2e91772b6f4e5154bd99d13252f8e3b75d0de001b89489c2b19de6 Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.528570 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 16:43:48 crc kubenswrapper[4884]: I1128 16:43:48.700729 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66574cf1-9c56-4894-978d-72441ce519df" path="/var/lib/kubelet/pods/66574cf1-9c56-4894-978d-72441ce519df/volumes" Nov 28 16:43:49 crc kubenswrapper[4884]: I1128 16:43:49.135083 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5ba56a3-af27-49c2-ad9e-fa0448157763" containerID="5da7116ab523242f97b6957ad88f99608e4fecd26041fdde3a1b4032d49c0acb" exitCode=0 Nov 28 16:43:49 crc kubenswrapper[4884]: I1128 16:43:49.135181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"a5ba56a3-af27-49c2-ad9e-fa0448157763","Type":"ContainerDied","Data":"5da7116ab523242f97b6957ad88f99608e4fecd26041fdde3a1b4032d49c0acb"} Nov 28 16:43:49 crc kubenswrapper[4884]: I1128 16:43:49.135223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"a5ba56a3-af27-49c2-ad9e-fa0448157763","Type":"ContainerStarted","Data":"13a385afca2e91772b6f4e5154bd99d13252f8e3b75d0de001b89489c2b19de6"} Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.535250 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.553990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rgm5\" (UniqueName: \"kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5\") pod \"a5ba56a3-af27-49c2-ad9e-fa0448157763\" (UID: \"a5ba56a3-af27-49c2-ad9e-fa0448157763\") " Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.555310 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_a5ba56a3-af27-49c2-ad9e-fa0448157763/mariadb-client-4-default/0.log" Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.562083 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5" (OuterVolumeSpecName: "kube-api-access-6rgm5") pod "a5ba56a3-af27-49c2-ad9e-fa0448157763" (UID: "a5ba56a3-af27-49c2-ad9e-fa0448157763"). InnerVolumeSpecName "kube-api-access-6rgm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.580652 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.595247 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.656399 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rgm5\" (UniqueName: \"kubernetes.io/projected/a5ba56a3-af27-49c2-ad9e-fa0448157763-kube-api-access-6rgm5\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:50 crc kubenswrapper[4884]: I1128 16:43:50.701665 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5ba56a3-af27-49c2-ad9e-fa0448157763" path="/var/lib/kubelet/pods/a5ba56a3-af27-49c2-ad9e-fa0448157763/volumes" Nov 28 16:43:51 crc kubenswrapper[4884]: I1128 16:43:51.160301 4884 scope.go:117] "RemoveContainer" containerID="5da7116ab523242f97b6957ad88f99608e4fecd26041fdde3a1b4032d49c0acb" Nov 28 16:43:51 crc kubenswrapper[4884]: I1128 16:43:51.160323 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 16:43:51 crc kubenswrapper[4884]: I1128 16:43:51.243083 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:43:51 crc kubenswrapper[4884]: I1128 16:43:51.243338 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.453018 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 16:43:54 crc kubenswrapper[4884]: E1128 16:43:54.453996 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ba56a3-af27-49c2-ad9e-fa0448157763" containerName="mariadb-client-4-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.454014 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ba56a3-af27-49c2-ad9e-fa0448157763" containerName="mariadb-client-4-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.454192 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5ba56a3-af27-49c2-ad9e-fa0448157763" containerName="mariadb-client-4-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.454730 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.457077 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2rn6m" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.460978 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.619483 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bf5h2\" (UniqueName: \"kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2\") pod \"mariadb-client-5-default\" (UID: \"fc29ca98-259a-4d43-9037-b4e606c7be23\") " pod="openstack/mariadb-client-5-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.720849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bf5h2\" (UniqueName: \"kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2\") pod \"mariadb-client-5-default\" (UID: \"fc29ca98-259a-4d43-9037-b4e606c7be23\") " pod="openstack/mariadb-client-5-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.739033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bf5h2\" (UniqueName: \"kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2\") pod \"mariadb-client-5-default\" (UID: \"fc29ca98-259a-4d43-9037-b4e606c7be23\") " pod="openstack/mariadb-client-5-default" Nov 28 16:43:54 crc kubenswrapper[4884]: I1128 16:43:54.817785 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 16:43:55 crc kubenswrapper[4884]: I1128 16:43:55.308117 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 16:43:56 crc kubenswrapper[4884]: I1128 16:43:56.205871 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc29ca98-259a-4d43-9037-b4e606c7be23" containerID="c0ca10b0cf5fafc66a0acfe38de6b08006d77ab173bd243c4cf1d504db70b7d7" exitCode=0 Nov 28 16:43:56 crc kubenswrapper[4884]: I1128 16:43:56.205984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"fc29ca98-259a-4d43-9037-b4e606c7be23","Type":"ContainerDied","Data":"c0ca10b0cf5fafc66a0acfe38de6b08006d77ab173bd243c4cf1d504db70b7d7"} Nov 28 16:43:56 crc kubenswrapper[4884]: I1128 16:43:56.206253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"fc29ca98-259a-4d43-9037-b4e606c7be23","Type":"ContainerStarted","Data":"70905f26854cab225ba3f0c498534e228682cb909825f9a161c8bfe3a8a65e10"} Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.556464 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.574539 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_fc29ca98-259a-4d43-9037-b4e606c7be23/mariadb-client-5-default/0.log" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.596844 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.602655 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.664321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf5h2\" (UniqueName: \"kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2\") pod \"fc29ca98-259a-4d43-9037-b4e606c7be23\" (UID: \"fc29ca98-259a-4d43-9037-b4e606c7be23\") " Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.670431 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2" (OuterVolumeSpecName: "kube-api-access-bf5h2") pod "fc29ca98-259a-4d43-9037-b4e606c7be23" (UID: "fc29ca98-259a-4d43-9037-b4e606c7be23"). InnerVolumeSpecName "kube-api-access-bf5h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.707897 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 16:43:57 crc kubenswrapper[4884]: E1128 16:43:57.708237 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc29ca98-259a-4d43-9037-b4e606c7be23" containerName="mariadb-client-5-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.708255 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc29ca98-259a-4d43-9037-b4e606c7be23" containerName="mariadb-client-5-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.708395 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc29ca98-259a-4d43-9037-b4e606c7be23" containerName="mariadb-client-5-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.708943 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.725244 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.765958 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf5h2\" (UniqueName: \"kubernetes.io/projected/fc29ca98-259a-4d43-9037-b4e606c7be23-kube-api-access-bf5h2\") on node \"crc\" DevicePath \"\"" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.867998 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5vf9\" (UniqueName: \"kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9\") pod \"mariadb-client-6-default\" (UID: \"02c7a488-4b80-41fd-a8f3-08c1870f93ac\") " pod="openstack/mariadb-client-6-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.969697 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5vf9\" (UniqueName: \"kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9\") pod \"mariadb-client-6-default\" (UID: \"02c7a488-4b80-41fd-a8f3-08c1870f93ac\") " pod="openstack/mariadb-client-6-default" Nov 28 16:43:57 crc kubenswrapper[4884]: I1128 16:43:57.997541 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5vf9\" (UniqueName: \"kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9\") pod \"mariadb-client-6-default\" (UID: \"02c7a488-4b80-41fd-a8f3-08c1870f93ac\") " pod="openstack/mariadb-client-6-default" Nov 28 16:43:58 crc kubenswrapper[4884]: I1128 16:43:58.032328 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 16:43:58 crc kubenswrapper[4884]: I1128 16:43:58.225215 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70905f26854cab225ba3f0c498534e228682cb909825f9a161c8bfe3a8a65e10" Nov 28 16:43:58 crc kubenswrapper[4884]: I1128 16:43:58.225455 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 16:43:58 crc kubenswrapper[4884]: I1128 16:43:58.540041 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 16:43:58 crc kubenswrapper[4884]: I1128 16:43:58.696735 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc29ca98-259a-4d43-9037-b4e606c7be23" path="/var/lib/kubelet/pods/fc29ca98-259a-4d43-9037-b4e606c7be23/volumes" Nov 28 16:43:59 crc kubenswrapper[4884]: I1128 16:43:59.232857 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"02c7a488-4b80-41fd-a8f3-08c1870f93ac","Type":"ContainerStarted","Data":"f179e1434cfb151c75c1805f811f29fb70b5cced41f1dc52764a6e7cffbc1ea5"} Nov 28 16:43:59 crc kubenswrapper[4884]: I1128 16:43:59.232906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"02c7a488-4b80-41fd-a8f3-08c1870f93ac","Type":"ContainerStarted","Data":"4df23cd00e8d765ad2d00e6cc3206a67ac34d55ff92edca1a4d8a2817cc337d7"} Nov 28 16:43:59 crc kubenswrapper[4884]: I1128 16:43:59.245874 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=2.245852965 podStartE2EDuration="2.245852965s" podCreationTimestamp="2025-11-28 16:43:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:43:59.24317802 +0000 UTC m=+5078.805961821" watchObservedRunningTime="2025-11-28 16:43:59.245852965 +0000 UTC m=+5078.808636766" Nov 28 16:43:59 crc kubenswrapper[4884]: I1128 16:43:59.320944 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-6-default_02c7a488-4b80-41fd-a8f3-08c1870f93ac/mariadb-client-6-default/0.log" Nov 28 16:44:00 crc kubenswrapper[4884]: I1128 16:44:00.242542 4884 generic.go:334] "Generic (PLEG): container finished" podID="02c7a488-4b80-41fd-a8f3-08c1870f93ac" containerID="f179e1434cfb151c75c1805f811f29fb70b5cced41f1dc52764a6e7cffbc1ea5" exitCode=1 Nov 28 16:44:00 crc kubenswrapper[4884]: I1128 16:44:00.242597 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"02c7a488-4b80-41fd-a8f3-08c1870f93ac","Type":"ContainerDied","Data":"f179e1434cfb151c75c1805f811f29fb70b5cced41f1dc52764a6e7cffbc1ea5"} Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.596695 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.630267 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.636352 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.728550 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5vf9\" (UniqueName: \"kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9\") pod \"02c7a488-4b80-41fd-a8f3-08c1870f93ac\" (UID: \"02c7a488-4b80-41fd-a8f3-08c1870f93ac\") " Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.734064 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9" (OuterVolumeSpecName: "kube-api-access-v5vf9") pod "02c7a488-4b80-41fd-a8f3-08c1870f93ac" (UID: "02c7a488-4b80-41fd-a8f3-08c1870f93ac"). InnerVolumeSpecName "kube-api-access-v5vf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.755172 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 16:44:01 crc kubenswrapper[4884]: E1128 16:44:01.755580 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02c7a488-4b80-41fd-a8f3-08c1870f93ac" containerName="mariadb-client-6-default" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.755604 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="02c7a488-4b80-41fd-a8f3-08c1870f93ac" containerName="mariadb-client-6-default" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.755794 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="02c7a488-4b80-41fd-a8f3-08c1870f93ac" containerName="mariadb-client-6-default" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.756449 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.763001 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.830953 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5vf9\" (UniqueName: \"kubernetes.io/projected/02c7a488-4b80-41fd-a8f3-08c1870f93ac-kube-api-access-v5vf9\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:01 crc kubenswrapper[4884]: I1128 16:44:01.932700 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlbcn\" (UniqueName: \"kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn\") pod \"mariadb-client-7-default\" (UID: \"7a2bbe2c-a276-4fff-b238-b15c04e0e051\") " pod="openstack/mariadb-client-7-default" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.034373 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlbcn\" (UniqueName: \"kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn\") pod \"mariadb-client-7-default\" (UID: \"7a2bbe2c-a276-4fff-b238-b15c04e0e051\") " pod="openstack/mariadb-client-7-default" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.050686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlbcn\" (UniqueName: \"kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn\") pod \"mariadb-client-7-default\" (UID: \"7a2bbe2c-a276-4fff-b238-b15c04e0e051\") " pod="openstack/mariadb-client-7-default" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.084176 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.260560 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4df23cd00e8d765ad2d00e6cc3206a67ac34d55ff92edca1a4d8a2817cc337d7" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.260613 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.559613 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 16:44:02 crc kubenswrapper[4884]: I1128 16:44:02.697937 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02c7a488-4b80-41fd-a8f3-08c1870f93ac" path="/var/lib/kubelet/pods/02c7a488-4b80-41fd-a8f3-08c1870f93ac/volumes" Nov 28 16:44:03 crc kubenswrapper[4884]: I1128 16:44:03.268461 4884 generic.go:334] "Generic (PLEG): container finished" podID="7a2bbe2c-a276-4fff-b238-b15c04e0e051" containerID="1976f4104086b69cdd0b99e5be0cab7df2f6c84304c2156f73732e15f9235f68" exitCode=0 Nov 28 16:44:03 crc kubenswrapper[4884]: I1128 16:44:03.268534 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"7a2bbe2c-a276-4fff-b238-b15c04e0e051","Type":"ContainerDied","Data":"1976f4104086b69cdd0b99e5be0cab7df2f6c84304c2156f73732e15f9235f68"} Nov 28 16:44:03 crc kubenswrapper[4884]: I1128 16:44:03.268816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"7a2bbe2c-a276-4fff-b238-b15c04e0e051","Type":"ContainerStarted","Data":"b4088b411a6179706931e0fdb05ded3caf4cedcb93838f75e547315ea39da32b"} Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.776416 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.795510 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_7a2bbe2c-a276-4fff-b238-b15c04e0e051/mariadb-client-7-default/0.log" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.824347 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.831929 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.884775 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlbcn\" (UniqueName: \"kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn\") pod \"7a2bbe2c-a276-4fff-b238-b15c04e0e051\" (UID: \"7a2bbe2c-a276-4fff-b238-b15c04e0e051\") " Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.889941 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn" (OuterVolumeSpecName: "kube-api-access-mlbcn") pod "7a2bbe2c-a276-4fff-b238-b15c04e0e051" (UID: "7a2bbe2c-a276-4fff-b238-b15c04e0e051"). InnerVolumeSpecName "kube-api-access-mlbcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.950670 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 28 16:44:04 crc kubenswrapper[4884]: E1128 16:44:04.951155 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a2bbe2c-a276-4fff-b238-b15c04e0e051" containerName="mariadb-client-7-default" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.951182 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a2bbe2c-a276-4fff-b238-b15c04e0e051" containerName="mariadb-client-7-default" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.951362 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a2bbe2c-a276-4fff-b238-b15c04e0e051" containerName="mariadb-client-7-default" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.951923 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.956746 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 16:44:04 crc kubenswrapper[4884]: I1128 16:44:04.986346 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlbcn\" (UniqueName: \"kubernetes.io/projected/7a2bbe2c-a276-4fff-b238-b15c04e0e051-kube-api-access-mlbcn\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.087674 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb99f\" (UniqueName: \"kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f\") pod \"mariadb-client-2\" (UID: \"109be5f9-8808-407d-a88a-099f2ca5bfb4\") " pod="openstack/mariadb-client-2" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.188876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb99f\" (UniqueName: \"kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f\") pod \"mariadb-client-2\" (UID: \"109be5f9-8808-407d-a88a-099f2ca5bfb4\") " pod="openstack/mariadb-client-2" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.206300 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb99f\" (UniqueName: \"kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f\") pod \"mariadb-client-2\" (UID: \"109be5f9-8808-407d-a88a-099f2ca5bfb4\") " pod="openstack/mariadb-client-2" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.272728 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.323414 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4088b411a6179706931e0fdb05ded3caf4cedcb93838f75e547315ea39da32b" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.323479 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 16:44:05 crc kubenswrapper[4884]: I1128 16:44:05.780289 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 16:44:06 crc kubenswrapper[4884]: I1128 16:44:06.332697 4884 generic.go:334] "Generic (PLEG): container finished" podID="109be5f9-8808-407d-a88a-099f2ca5bfb4" containerID="412f6073d42f28ed9ba04594734f40f801bc5cb19bcc11b567a9fadf61e163ac" exitCode=0 Nov 28 16:44:06 crc kubenswrapper[4884]: I1128 16:44:06.332813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"109be5f9-8808-407d-a88a-099f2ca5bfb4","Type":"ContainerDied","Data":"412f6073d42f28ed9ba04594734f40f801bc5cb19bcc11b567a9fadf61e163ac"} Nov 28 16:44:06 crc kubenswrapper[4884]: I1128 16:44:06.333128 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"109be5f9-8808-407d-a88a-099f2ca5bfb4","Type":"ContainerStarted","Data":"e9ef4a8fd6bf6953a06a26d954a39b8e836e4848c3fdceaa3bf6a87ca0422d6b"} Nov 28 16:44:06 crc kubenswrapper[4884]: I1128 16:44:06.698452 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a2bbe2c-a276-4fff-b238-b15c04e0e051" path="/var/lib/kubelet/pods/7a2bbe2c-a276-4fff-b238-b15c04e0e051/volumes" Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.703865 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.723925 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_109be5f9-8808-407d-a88a-099f2ca5bfb4/mariadb-client-2/0.log" Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.754431 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.762952 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.830959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wb99f\" (UniqueName: \"kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f\") pod \"109be5f9-8808-407d-a88a-099f2ca5bfb4\" (UID: \"109be5f9-8808-407d-a88a-099f2ca5bfb4\") " Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.839124 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f" (OuterVolumeSpecName: "kube-api-access-wb99f") pod "109be5f9-8808-407d-a88a-099f2ca5bfb4" (UID: "109be5f9-8808-407d-a88a-099f2ca5bfb4"). InnerVolumeSpecName "kube-api-access-wb99f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:44:07 crc kubenswrapper[4884]: I1128 16:44:07.932878 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wb99f\" (UniqueName: \"kubernetes.io/projected/109be5f9-8808-407d-a88a-099f2ca5bfb4-kube-api-access-wb99f\") on node \"crc\" DevicePath \"\"" Nov 28 16:44:08 crc kubenswrapper[4884]: I1128 16:44:08.351132 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9ef4a8fd6bf6953a06a26d954a39b8e836e4848c3fdceaa3bf6a87ca0422d6b" Nov 28 16:44:08 crc kubenswrapper[4884]: I1128 16:44:08.351233 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 16:44:08 crc kubenswrapper[4884]: I1128 16:44:08.697511 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="109be5f9-8808-407d-a88a-099f2ca5bfb4" path="/var/lib/kubelet/pods/109be5f9-8808-407d-a88a-099f2ca5bfb4/volumes" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.243081 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.243715 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.243784 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.244303 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.244354 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" gracePeriod=600 Nov 28 16:44:21 crc kubenswrapper[4884]: E1128 16:44:21.389402 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.466495 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" exitCode=0 Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.466547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41"} Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.466582 4884 scope.go:117] "RemoveContainer" containerID="1d8ad494f9b340b7f3c7e5e1e5e1e5813f7995968d1a726033a4d884c453dacf" Nov 28 16:44:21 crc kubenswrapper[4884]: I1128 16:44:21.467073 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:44:21 crc kubenswrapper[4884]: E1128 16:44:21.467286 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:44:29 crc kubenswrapper[4884]: I1128 16:44:29.193692 4884 scope.go:117] "RemoveContainer" containerID="2ce63d25c8f5fce9d965f4d02571a22e8dfc773a4d721dbbca0948f9e85cc06a" Nov 28 16:44:32 crc kubenswrapper[4884]: I1128 16:44:32.689273 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:44:32 crc kubenswrapper[4884]: E1128 16:44:32.690018 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:44:46 crc kubenswrapper[4884]: I1128 16:44:46.688406 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:44:46 crc kubenswrapper[4884]: E1128 16:44:46.689451 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.435462 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:44:54 crc kubenswrapper[4884]: E1128 16:44:54.436458 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="109be5f9-8808-407d-a88a-099f2ca5bfb4" containerName="mariadb-client-2" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.436479 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="109be5f9-8808-407d-a88a-099f2ca5bfb4" containerName="mariadb-client-2" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.436685 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="109be5f9-8808-407d-a88a-099f2ca5bfb4" containerName="mariadb-client-2" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.438154 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.445082 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.505862 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.506002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.506055 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2rvs\" (UniqueName: \"kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.607137 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.607261 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.607300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2rvs\" (UniqueName: \"kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.607683 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.607743 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.635905 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.636923 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2rvs\" (UniqueName: \"kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs\") pod \"certified-operators-s42h4\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.637923 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.644586 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.708525 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.708667 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.708704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfvtc\" (UniqueName: \"kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.770518 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.810225 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.810299 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.810333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfvtc\" (UniqueName: \"kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.810868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.810944 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.838669 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfvtc\" (UniqueName: \"kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc\") pod \"redhat-marketplace-msnss\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:54 crc kubenswrapper[4884]: I1128 16:44:54.984036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.299765 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.519503 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:44:55 crc kubenswrapper[4884]: W1128 16:44:55.536159 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc943e890_073d_4862_bea8_899afe75491c.slice/crio-535e7eeb272156bb82a8ae78c3c775485613651976ce30976b28c75665a9f1a6 WatchSource:0}: Error finding container 535e7eeb272156bb82a8ae78c3c775485613651976ce30976b28c75665a9f1a6: Status 404 returned error can't find the container with id 535e7eeb272156bb82a8ae78c3c775485613651976ce30976b28c75665a9f1a6 Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.752319 4884 generic.go:334] "Generic (PLEG): container finished" podID="c943e890-073d-4862-bea8-899afe75491c" containerID="2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2" exitCode=0 Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.752385 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerDied","Data":"2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2"} Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.752455 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerStarted","Data":"535e7eeb272156bb82a8ae78c3c775485613651976ce30976b28c75665a9f1a6"} Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.759816 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.761523 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerID="aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8" exitCode=0 Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.761578 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerDied","Data":"aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8"} Nov 28 16:44:55 crc kubenswrapper[4884]: I1128 16:44:55.761616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerStarted","Data":"b9235c314cba1dbe2d23413fccd8297fe42c0e8c04374a0f40bfa0cc3e65db9d"} Nov 28 16:44:57 crc kubenswrapper[4884]: I1128 16:44:57.779416 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerID="434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88" exitCode=0 Nov 28 16:44:57 crc kubenswrapper[4884]: I1128 16:44:57.779647 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerDied","Data":"434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88"} Nov 28 16:44:57 crc kubenswrapper[4884]: I1128 16:44:57.782481 4884 generic.go:334] "Generic (PLEG): container finished" podID="c943e890-073d-4862-bea8-899afe75491c" containerID="cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54" exitCode=0 Nov 28 16:44:57 crc kubenswrapper[4884]: I1128 16:44:57.782514 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerDied","Data":"cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54"} Nov 28 16:44:58 crc kubenswrapper[4884]: I1128 16:44:58.689734 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:44:58 crc kubenswrapper[4884]: E1128 16:44:58.690063 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:44:58 crc kubenswrapper[4884]: I1128 16:44:58.795467 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerStarted","Data":"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9"} Nov 28 16:44:58 crc kubenswrapper[4884]: I1128 16:44:58.798941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerStarted","Data":"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940"} Nov 28 16:44:58 crc kubenswrapper[4884]: I1128 16:44:58.830506 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-msnss" podStartSLOduration=2.251381942 podStartE2EDuration="4.830487295s" podCreationTimestamp="2025-11-28 16:44:54 +0000 UTC" firstStartedPulling="2025-11-28 16:44:55.759475845 +0000 UTC m=+5135.322259666" lastFinishedPulling="2025-11-28 16:44:58.338581218 +0000 UTC m=+5137.901365019" observedRunningTime="2025-11-28 16:44:58.821488236 +0000 UTC m=+5138.384272037" watchObservedRunningTime="2025-11-28 16:44:58.830487295 +0000 UTC m=+5138.393271096" Nov 28 16:44:58 crc kubenswrapper[4884]: I1128 16:44:58.840492 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s42h4" podStartSLOduration=2.373303615 podStartE2EDuration="4.8404763s" podCreationTimestamp="2025-11-28 16:44:54 +0000 UTC" firstStartedPulling="2025-11-28 16:44:55.765223965 +0000 UTC m=+5135.328007776" lastFinishedPulling="2025-11-28 16:44:58.23239666 +0000 UTC m=+5137.795180461" observedRunningTime="2025-11-28 16:44:58.837272312 +0000 UTC m=+5138.400056103" watchObservedRunningTime="2025-11-28 16:44:58.8404763 +0000 UTC m=+5138.403260091" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.144677 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64"] Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.146365 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.148079 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.149839 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.154689 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64"] Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.185413 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jprq\" (UniqueName: \"kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.185475 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.185546 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.287474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jprq\" (UniqueName: \"kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.287544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.287594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.288730 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.293200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.309659 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jprq\" (UniqueName: \"kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq\") pod \"collect-profiles-29405805-xmw64\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.471210 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.725160 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64"] Nov 28 16:45:00 crc kubenswrapper[4884]: I1128 16:45:00.816951 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" event={"ID":"6742bb97-7915-4d02-859d-472f0576c8d3","Type":"ContainerStarted","Data":"6710c32996eb6c5675b94c1e80dbf3a0060ab27fb240e85764d0b1e55c6d2add"} Nov 28 16:45:01 crc kubenswrapper[4884]: I1128 16:45:01.828254 4884 generic.go:334] "Generic (PLEG): container finished" podID="6742bb97-7915-4d02-859d-472f0576c8d3" containerID="602ed84b7660f0e227c280cbf10d5abbf30ef8a4a49d2d4388fc84bf767775df" exitCode=0 Nov 28 16:45:01 crc kubenswrapper[4884]: I1128 16:45:01.828311 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" event={"ID":"6742bb97-7915-4d02-859d-472f0576c8d3","Type":"ContainerDied","Data":"602ed84b7660f0e227c280cbf10d5abbf30ef8a4a49d2d4388fc84bf767775df"} Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.220571 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.234654 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume\") pod \"6742bb97-7915-4d02-859d-472f0576c8d3\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.234707 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jprq\" (UniqueName: \"kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq\") pod \"6742bb97-7915-4d02-859d-472f0576c8d3\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.234752 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume\") pod \"6742bb97-7915-4d02-859d-472f0576c8d3\" (UID: \"6742bb97-7915-4d02-859d-472f0576c8d3\") " Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.237434 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume" (OuterVolumeSpecName: "config-volume") pod "6742bb97-7915-4d02-859d-472f0576c8d3" (UID: "6742bb97-7915-4d02-859d-472f0576c8d3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.242575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6742bb97-7915-4d02-859d-472f0576c8d3" (UID: "6742bb97-7915-4d02-859d-472f0576c8d3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.243953 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq" (OuterVolumeSpecName: "kube-api-access-5jprq") pod "6742bb97-7915-4d02-859d-472f0576c8d3" (UID: "6742bb97-7915-4d02-859d-472f0576c8d3"). InnerVolumeSpecName "kube-api-access-5jprq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.336904 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6742bb97-7915-4d02-859d-472f0576c8d3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.336946 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jprq\" (UniqueName: \"kubernetes.io/projected/6742bb97-7915-4d02-859d-472f0576c8d3-kube-api-access-5jprq\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.336959 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6742bb97-7915-4d02-859d-472f0576c8d3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.842834 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" event={"ID":"6742bb97-7915-4d02-859d-472f0576c8d3","Type":"ContainerDied","Data":"6710c32996eb6c5675b94c1e80dbf3a0060ab27fb240e85764d0b1e55c6d2add"} Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.842876 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6710c32996eb6c5675b94c1e80dbf3a0060ab27fb240e85764d0b1e55c6d2add" Nov 28 16:45:03 crc kubenswrapper[4884]: I1128 16:45:03.842894 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.304404 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq"] Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.309755 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-254vq"] Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.712484 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e5bbc2-2cae-43c5-ae25-714a75aac5ff" path="/var/lib/kubelet/pods/45e5bbc2-2cae-43c5-ae25-714a75aac5ff/volumes" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.771255 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.771808 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.817201 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.892502 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.984477 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:04 crc kubenswrapper[4884]: I1128 16:45:04.984795 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:05 crc kubenswrapper[4884]: I1128 16:45:05.024267 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:05 crc kubenswrapper[4884]: I1128 16:45:05.055903 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:45:05 crc kubenswrapper[4884]: I1128 16:45:05.898497 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:06 crc kubenswrapper[4884]: I1128 16:45:06.865057 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s42h4" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="registry-server" containerID="cri-o://c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940" gracePeriod=2 Nov 28 16:45:07 crc kubenswrapper[4884]: I1128 16:45:07.254485 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.388545 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.419309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content\") pod \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.419417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2rvs\" (UniqueName: \"kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs\") pod \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.419551 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities\") pod \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\" (UID: \"0e7a5dd6-e0cb-4f44-9d23-4183c223820c\") " Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.420850 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities" (OuterVolumeSpecName: "utilities") pod "0e7a5dd6-e0cb-4f44-9d23-4183c223820c" (UID: "0e7a5dd6-e0cb-4f44-9d23-4183c223820c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.427713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs" (OuterVolumeSpecName: "kube-api-access-c2rvs") pod "0e7a5dd6-e0cb-4f44-9d23-4183c223820c" (UID: "0e7a5dd6-e0cb-4f44-9d23-4183c223820c"). InnerVolumeSpecName "kube-api-access-c2rvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.466926 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e7a5dd6-e0cb-4f44-9d23-4183c223820c" (UID: "0e7a5dd6-e0cb-4f44-9d23-4183c223820c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.521112 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.521143 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2rvs\" (UniqueName: \"kubernetes.io/projected/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-kube-api-access-c2rvs\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.521153 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e7a5dd6-e0cb-4f44-9d23-4183c223820c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884254 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerID="c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940" exitCode=0 Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884322 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s42h4" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884339 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerDied","Data":"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940"} Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884386 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s42h4" event={"ID":"0e7a5dd6-e0cb-4f44-9d23-4183c223820c","Type":"ContainerDied","Data":"b9235c314cba1dbe2d23413fccd8297fe42c0e8c04374a0f40bfa0cc3e65db9d"} Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884403 4884 scope.go:117] "RemoveContainer" containerID="c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.884709 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-msnss" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="registry-server" containerID="cri-o://0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9" gracePeriod=2 Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.929894 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.936184 4884 scope.go:117] "RemoveContainer" containerID="434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88" Nov 28 16:45:08 crc kubenswrapper[4884]: I1128 16:45:08.945110 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s42h4"] Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.009659 4884 scope.go:117] "RemoveContainer" containerID="aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.057081 4884 scope.go:117] "RemoveContainer" containerID="c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940" Nov 28 16:45:09 crc kubenswrapper[4884]: E1128 16:45:09.057782 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940\": container with ID starting with c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940 not found: ID does not exist" containerID="c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.057848 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940"} err="failed to get container status \"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940\": rpc error: code = NotFound desc = could not find container \"c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940\": container with ID starting with c74dfb52686fa0ad284f741259c88ab1a7875d18db0a27b1101475185bf6e940 not found: ID does not exist" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.057892 4884 scope.go:117] "RemoveContainer" containerID="434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88" Nov 28 16:45:09 crc kubenswrapper[4884]: E1128 16:45:09.058340 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88\": container with ID starting with 434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88 not found: ID does not exist" containerID="434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.058375 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88"} err="failed to get container status \"434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88\": rpc error: code = NotFound desc = could not find container \"434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88\": container with ID starting with 434598a5daffb2fa7cbbc34248eabaf2080690e0993d6cda2158c91984926c88 not found: ID does not exist" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.058399 4884 scope.go:117] "RemoveContainer" containerID="aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8" Nov 28 16:45:09 crc kubenswrapper[4884]: E1128 16:45:09.058936 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8\": container with ID starting with aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8 not found: ID does not exist" containerID="aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.058972 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8"} err="failed to get container status \"aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8\": rpc error: code = NotFound desc = could not find container \"aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8\": container with ID starting with aef5530e0b2ea7198df0f3c1e2484f8df01e05db5f921368d8df29490716a3a8 not found: ID does not exist" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.252843 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.338133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfvtc\" (UniqueName: \"kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc\") pod \"c943e890-073d-4862-bea8-899afe75491c\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.338491 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities\") pod \"c943e890-073d-4862-bea8-899afe75491c\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.338550 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content\") pod \"c943e890-073d-4862-bea8-899afe75491c\" (UID: \"c943e890-073d-4862-bea8-899afe75491c\") " Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.339228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities" (OuterVolumeSpecName: "utilities") pod "c943e890-073d-4862-bea8-899afe75491c" (UID: "c943e890-073d-4862-bea8-899afe75491c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.339614 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.341448 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc" (OuterVolumeSpecName: "kube-api-access-nfvtc") pod "c943e890-073d-4862-bea8-899afe75491c" (UID: "c943e890-073d-4862-bea8-899afe75491c"). InnerVolumeSpecName "kube-api-access-nfvtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.360697 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c943e890-073d-4862-bea8-899afe75491c" (UID: "c943e890-073d-4862-bea8-899afe75491c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.441624 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c943e890-073d-4862-bea8-899afe75491c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.441651 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfvtc\" (UniqueName: \"kubernetes.io/projected/c943e890-073d-4862-bea8-899afe75491c-kube-api-access-nfvtc\") on node \"crc\" DevicePath \"\"" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.688380 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:45:09 crc kubenswrapper[4884]: E1128 16:45:09.688722 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.895948 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerDied","Data":"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9"} Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.896262 4884 scope.go:117] "RemoveContainer" containerID="0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.896021 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-msnss" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.895893 4884 generic.go:334] "Generic (PLEG): container finished" podID="c943e890-073d-4862-bea8-899afe75491c" containerID="0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9" exitCode=0 Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.897390 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-msnss" event={"ID":"c943e890-073d-4862-bea8-899afe75491c","Type":"ContainerDied","Data":"535e7eeb272156bb82a8ae78c3c775485613651976ce30976b28c75665a9f1a6"} Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.918553 4884 scope.go:117] "RemoveContainer" containerID="cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54" Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.936781 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.955234 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-msnss"] Nov 28 16:45:09 crc kubenswrapper[4884]: I1128 16:45:09.971536 4884 scope.go:117] "RemoveContainer" containerID="2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.002573 4884 scope.go:117] "RemoveContainer" containerID="0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9" Nov 28 16:45:10 crc kubenswrapper[4884]: E1128 16:45:10.003137 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9\": container with ID starting with 0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9 not found: ID does not exist" containerID="0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.003178 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9"} err="failed to get container status \"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9\": rpc error: code = NotFound desc = could not find container \"0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9\": container with ID starting with 0fe47dbc0e49071d7ca62a1431cb967da5b8ac7024c4a383ae7fa898ddd293a9 not found: ID does not exist" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.003206 4884 scope.go:117] "RemoveContainer" containerID="cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54" Nov 28 16:45:10 crc kubenswrapper[4884]: E1128 16:45:10.003769 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54\": container with ID starting with cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54 not found: ID does not exist" containerID="cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.003814 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54"} err="failed to get container status \"cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54\": rpc error: code = NotFound desc = could not find container \"cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54\": container with ID starting with cd2309db8f8878bac9c6c8256b1634177c73f271030b8910bc3ebc30c6b3ae54 not found: ID does not exist" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.003847 4884 scope.go:117] "RemoveContainer" containerID="2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2" Nov 28 16:45:10 crc kubenswrapper[4884]: E1128 16:45:10.004378 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2\": container with ID starting with 2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2 not found: ID does not exist" containerID="2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.004401 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2"} err="failed to get container status \"2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2\": rpc error: code = NotFound desc = could not find container \"2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2\": container with ID starting with 2d9b52e953e592ae567bcfc152b5cc29816720912970bf214e1fdb047efb74d2 not found: ID does not exist" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.699837 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" path="/var/lib/kubelet/pods/0e7a5dd6-e0cb-4f44-9d23-4183c223820c/volumes" Nov 28 16:45:10 crc kubenswrapper[4884]: I1128 16:45:10.700704 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c943e890-073d-4862-bea8-899afe75491c" path="/var/lib/kubelet/pods/c943e890-073d-4862-bea8-899afe75491c/volumes" Nov 28 16:45:22 crc kubenswrapper[4884]: I1128 16:45:22.688681 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:45:22 crc kubenswrapper[4884]: E1128 16:45:22.689308 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:45:29 crc kubenswrapper[4884]: I1128 16:45:29.279715 4884 scope.go:117] "RemoveContainer" containerID="bdf130558afcdca20368ab6b3a1762b9f15162dea3f32e4a7777dd3717c91cac" Nov 28 16:45:34 crc kubenswrapper[4884]: I1128 16:45:34.688916 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:45:34 crc kubenswrapper[4884]: E1128 16:45:34.690285 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:45:48 crc kubenswrapper[4884]: I1128 16:45:48.689065 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:45:48 crc kubenswrapper[4884]: E1128 16:45:48.689767 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:46:02 crc kubenswrapper[4884]: I1128 16:46:02.689188 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:46:02 crc kubenswrapper[4884]: E1128 16:46:02.689937 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:46:15 crc kubenswrapper[4884]: I1128 16:46:15.688396 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:46:15 crc kubenswrapper[4884]: E1128 16:46:15.689115 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:46:30 crc kubenswrapper[4884]: I1128 16:46:30.693278 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:46:30 crc kubenswrapper[4884]: E1128 16:46:30.694005 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:46:45 crc kubenswrapper[4884]: I1128 16:46:45.688569 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:46:45 crc kubenswrapper[4884]: E1128 16:46:45.690035 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:46:58 crc kubenswrapper[4884]: I1128 16:46:58.688439 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:46:58 crc kubenswrapper[4884]: E1128 16:46:58.689526 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:47:10 crc kubenswrapper[4884]: I1128 16:47:10.693933 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:47:10 crc kubenswrapper[4884]: E1128 16:47:10.696295 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:47:23 crc kubenswrapper[4884]: I1128 16:47:23.688751 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:47:23 crc kubenswrapper[4884]: E1128 16:47:23.690482 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:47:34 crc kubenswrapper[4884]: I1128 16:47:34.688889 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:47:34 crc kubenswrapper[4884]: E1128 16:47:34.690130 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:47:47 crc kubenswrapper[4884]: I1128 16:47:47.688814 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:47:47 crc kubenswrapper[4884]: E1128 16:47:47.690437 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:47:58 crc kubenswrapper[4884]: I1128 16:47:58.688877 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:47:58 crc kubenswrapper[4884]: E1128 16:47:58.689976 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.570729 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.571906 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.571933 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.571962 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="extract-utilities" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.571973 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="extract-utilities" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.571985 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="extract-content" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.571998 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="extract-content" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.572014 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="extract-content" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572025 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="extract-content" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.572050 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6742bb97-7915-4d02-859d-472f0576c8d3" containerName="collect-profiles" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572060 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6742bb97-7915-4d02-859d-472f0576c8d3" containerName="collect-profiles" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.572168 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="extract-utilities" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572183 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="extract-utilities" Nov 28 16:48:09 crc kubenswrapper[4884]: E1128 16:48:09.572205 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572216 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572438 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6742bb97-7915-4d02-859d-472f0576c8d3" containerName="collect-profiles" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572476 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e7a5dd6-e0cb-4f44-9d23-4183c223820c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.572501 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c943e890-073d-4862-bea8-899afe75491c" containerName="registry-server" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.573266 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.576862 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-2rn6m" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.587491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.735139 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.735202 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-972x6\" (UniqueName: \"kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.836986 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.837077 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-972x6\" (UniqueName: \"kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.840417 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.840462 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f6b22f40a55399d91fd9c038c20146152bf60083628d35222a676698a4c60954/globalmount\"" pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.860798 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-972x6\" (UniqueName: \"kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.869426 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") pod \"mariadb-copy-data\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " pod="openstack/mariadb-copy-data" Nov 28 16:48:09 crc kubenswrapper[4884]: I1128 16:48:09.898173 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.005737 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.009528 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.017819 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.141262 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9nq9\" (UniqueName: \"kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.141312 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.141356 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.242961 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9nq9\" (UniqueName: \"kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.243007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.243049 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.243657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.243660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.264044 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9nq9\" (UniqueName: \"kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9\") pod \"community-operators-tdm6q\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.385534 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.545278 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.647760 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:10 crc kubenswrapper[4884]: W1128 16:48:10.648599 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0659feed_2449_4ab0_8c90_526281d8f114.slice/crio-60792fc12ed57699a6caa5c8434eba8744c92e71368ba84d8aa5c709e6563869 WatchSource:0}: Error finding container 60792fc12ed57699a6caa5c8434eba8744c92e71368ba84d8aa5c709e6563869: Status 404 returned error can't find the container with id 60792fc12ed57699a6caa5c8434eba8744c92e71368ba84d8aa5c709e6563869 Nov 28 16:48:10 crc kubenswrapper[4884]: I1128 16:48:10.694580 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:48:10 crc kubenswrapper[4884]: E1128 16:48:10.694810 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.466734 4884 generic.go:334] "Generic (PLEG): container finished" podID="0659feed-2449-4ab0-8c90-526281d8f114" containerID="363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec" exitCode=0 Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.466786 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerDied","Data":"363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec"} Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.467355 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerStarted","Data":"60792fc12ed57699a6caa5c8434eba8744c92e71368ba84d8aa5c709e6563869"} Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.472741 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"2c879f6b-8b91-4d6a-b465-c82e9cec3f92","Type":"ContainerStarted","Data":"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68"} Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.472807 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"2c879f6b-8b91-4d6a-b465-c82e9cec3f92","Type":"ContainerStarted","Data":"e17b1ae10bdb41ad9d1526cb207afcfe9a1f2b17b12fe32b8ceaf91a8f3feeb9"} Nov 28 16:48:11 crc kubenswrapper[4884]: I1128 16:48:11.520725 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.520703041 podStartE2EDuration="3.520703041s" podCreationTimestamp="2025-11-28 16:48:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:11.51415443 +0000 UTC m=+5331.076938281" watchObservedRunningTime="2025-11-28 16:48:11.520703041 +0000 UTC m=+5331.083486842" Nov 28 16:48:13 crc kubenswrapper[4884]: I1128 16:48:13.497269 4884 generic.go:334] "Generic (PLEG): container finished" podID="0659feed-2449-4ab0-8c90-526281d8f114" containerID="047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f" exitCode=0 Nov 28 16:48:13 crc kubenswrapper[4884]: I1128 16:48:13.497323 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerDied","Data":"047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f"} Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.217766 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.220105 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.229754 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.309658 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25g27\" (UniqueName: \"kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27\") pod \"mariadb-client\" (UID: \"791fe22a-6e43-4394-bcb9-051bcefccbb5\") " pod="openstack/mariadb-client" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.411785 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25g27\" (UniqueName: \"kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27\") pod \"mariadb-client\" (UID: \"791fe22a-6e43-4394-bcb9-051bcefccbb5\") " pod="openstack/mariadb-client" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.439884 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25g27\" (UniqueName: \"kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27\") pod \"mariadb-client\" (UID: \"791fe22a-6e43-4394-bcb9-051bcefccbb5\") " pod="openstack/mariadb-client" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.513247 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerStarted","Data":"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29"} Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.537139 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tdm6q" podStartSLOduration=2.884470402 podStartE2EDuration="5.537118366s" podCreationTimestamp="2025-11-28 16:48:09 +0000 UTC" firstStartedPulling="2025-11-28 16:48:11.469422456 +0000 UTC m=+5331.032206257" lastFinishedPulling="2025-11-28 16:48:14.12207038 +0000 UTC m=+5333.684854221" observedRunningTime="2025-11-28 16:48:14.532552385 +0000 UTC m=+5334.095336206" watchObservedRunningTime="2025-11-28 16:48:14.537118366 +0000 UTC m=+5334.099902167" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.540683 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:14 crc kubenswrapper[4884]: I1128 16:48:14.781871 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:15 crc kubenswrapper[4884]: I1128 16:48:15.525019 4884 generic.go:334] "Generic (PLEG): container finished" podID="791fe22a-6e43-4394-bcb9-051bcefccbb5" containerID="fc25f88e7c9fda9926c782282192c5cd1028c84c5f1bd33e470b1a1c7870b5c8" exitCode=0 Nov 28 16:48:15 crc kubenswrapper[4884]: I1128 16:48:15.526376 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"791fe22a-6e43-4394-bcb9-051bcefccbb5","Type":"ContainerDied","Data":"fc25f88e7c9fda9926c782282192c5cd1028c84c5f1bd33e470b1a1c7870b5c8"} Nov 28 16:48:15 crc kubenswrapper[4884]: I1128 16:48:15.526555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"791fe22a-6e43-4394-bcb9-051bcefccbb5","Type":"ContainerStarted","Data":"e03eadb06a441c23f6eb8e30ee937bf7255ed5f73ae2ab36143900726ee6229d"} Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.834009 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.853922 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_791fe22a-6e43-4394-bcb9-051bcefccbb5/mariadb-client/0.log" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.874898 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.881744 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.949994 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25g27\" (UniqueName: \"kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27\") pod \"791fe22a-6e43-4394-bcb9-051bcefccbb5\" (UID: \"791fe22a-6e43-4394-bcb9-051bcefccbb5\") " Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.954975 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27" (OuterVolumeSpecName: "kube-api-access-25g27") pod "791fe22a-6e43-4394-bcb9-051bcefccbb5" (UID: "791fe22a-6e43-4394-bcb9-051bcefccbb5"). InnerVolumeSpecName "kube-api-access-25g27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.994841 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:16 crc kubenswrapper[4884]: E1128 16:48:16.995208 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="791fe22a-6e43-4394-bcb9-051bcefccbb5" containerName="mariadb-client" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.995230 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="791fe22a-6e43-4394-bcb9-051bcefccbb5" containerName="mariadb-client" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.995448 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="791fe22a-6e43-4394-bcb9-051bcefccbb5" containerName="mariadb-client" Nov 28 16:48:16 crc kubenswrapper[4884]: I1128 16:48:16.996139 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.001921 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.051887 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25g27\" (UniqueName: \"kubernetes.io/projected/791fe22a-6e43-4394-bcb9-051bcefccbb5-kube-api-access-25g27\") on node \"crc\" DevicePath \"\"" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.153122 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpk28\" (UniqueName: \"kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28\") pod \"mariadb-client\" (UID: \"cf3c89c8-123d-421c-9317-a315ac8b7891\") " pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.255264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpk28\" (UniqueName: \"kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28\") pod \"mariadb-client\" (UID: \"cf3c89c8-123d-421c-9317-a315ac8b7891\") " pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.273848 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpk28\" (UniqueName: \"kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28\") pod \"mariadb-client\" (UID: \"cf3c89c8-123d-421c-9317-a315ac8b7891\") " pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.310997 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.544417 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e03eadb06a441c23f6eb8e30ee937bf7255ed5f73ae2ab36143900726ee6229d" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.544550 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.562464 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="791fe22a-6e43-4394-bcb9-051bcefccbb5" podUID="cf3c89c8-123d-421c-9317-a315ac8b7891" Nov 28 16:48:17 crc kubenswrapper[4884]: I1128 16:48:17.797725 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:17 crc kubenswrapper[4884]: W1128 16:48:17.802971 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf3c89c8_123d_421c_9317_a315ac8b7891.slice/crio-75e33e2f1bc65cbc9740a501a6b5fd4c1c0fb8017eaa572cfc4b2d236c50ea52 WatchSource:0}: Error finding container 75e33e2f1bc65cbc9740a501a6b5fd4c1c0fb8017eaa572cfc4b2d236c50ea52: Status 404 returned error can't find the container with id 75e33e2f1bc65cbc9740a501a6b5fd4c1c0fb8017eaa572cfc4b2d236c50ea52 Nov 28 16:48:18 crc kubenswrapper[4884]: I1128 16:48:18.555448 4884 generic.go:334] "Generic (PLEG): container finished" podID="cf3c89c8-123d-421c-9317-a315ac8b7891" containerID="a246be2c484945847e845a62d246895251e6c947dff1e9d067a951464e2d6d18" exitCode=0 Nov 28 16:48:18 crc kubenswrapper[4884]: I1128 16:48:18.555525 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"cf3c89c8-123d-421c-9317-a315ac8b7891","Type":"ContainerDied","Data":"a246be2c484945847e845a62d246895251e6c947dff1e9d067a951464e2d6d18"} Nov 28 16:48:18 crc kubenswrapper[4884]: I1128 16:48:18.555810 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"cf3c89c8-123d-421c-9317-a315ac8b7891","Type":"ContainerStarted","Data":"75e33e2f1bc65cbc9740a501a6b5fd4c1c0fb8017eaa572cfc4b2d236c50ea52"} Nov 28 16:48:18 crc kubenswrapper[4884]: I1128 16:48:18.704644 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="791fe22a-6e43-4394-bcb9-051bcefccbb5" path="/var/lib/kubelet/pods/791fe22a-6e43-4394-bcb9-051bcefccbb5/volumes" Nov 28 16:48:19 crc kubenswrapper[4884]: I1128 16:48:19.965647 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:19 crc kubenswrapper[4884]: I1128 16:48:19.985153 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_cf3c89c8-123d-421c-9317-a315ac8b7891/mariadb-client/0.log" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.012991 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.017988 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.098762 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpk28\" (UniqueName: \"kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28\") pod \"cf3c89c8-123d-421c-9317-a315ac8b7891\" (UID: \"cf3c89c8-123d-421c-9317-a315ac8b7891\") " Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.108434 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28" (OuterVolumeSpecName: "kube-api-access-fpk28") pod "cf3c89c8-123d-421c-9317-a315ac8b7891" (UID: "cf3c89c8-123d-421c-9317-a315ac8b7891"). InnerVolumeSpecName "kube-api-access-fpk28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.201158 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpk28\" (UniqueName: \"kubernetes.io/projected/cf3c89c8-123d-421c-9317-a315ac8b7891-kube-api-access-fpk28\") on node \"crc\" DevicePath \"\"" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.386083 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.386396 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.451246 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.579208 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.579246 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75e33e2f1bc65cbc9740a501a6b5fd4c1c0fb8017eaa572cfc4b2d236c50ea52" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.634650 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.703153 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf3c89c8-123d-421c-9317-a315ac8b7891" path="/var/lib/kubelet/pods/cf3c89c8-123d-421c-9317-a315ac8b7891/volumes" Nov 28 16:48:20 crc kubenswrapper[4884]: I1128 16:48:20.703829 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:21 crc kubenswrapper[4884]: I1128 16:48:21.688994 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:48:21 crc kubenswrapper[4884]: E1128 16:48:21.689623 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:22 crc kubenswrapper[4884]: I1128 16:48:22.597171 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tdm6q" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="registry-server" containerID="cri-o://b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29" gracePeriod=2 Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.550204 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.612121 4884 generic.go:334] "Generic (PLEG): container finished" podID="0659feed-2449-4ab0-8c90-526281d8f114" containerID="b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29" exitCode=0 Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.612184 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerDied","Data":"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29"} Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.612255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tdm6q" event={"ID":"0659feed-2449-4ab0-8c90-526281d8f114","Type":"ContainerDied","Data":"60792fc12ed57699a6caa5c8434eba8744c92e71368ba84d8aa5c709e6563869"} Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.612248 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tdm6q" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.612280 4884 scope.go:117] "RemoveContainer" containerID="b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.637054 4884 scope.go:117] "RemoveContainer" containerID="047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.655135 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9nq9\" (UniqueName: \"kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9\") pod \"0659feed-2449-4ab0-8c90-526281d8f114\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.655242 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content\") pod \"0659feed-2449-4ab0-8c90-526281d8f114\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.655344 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities\") pod \"0659feed-2449-4ab0-8c90-526281d8f114\" (UID: \"0659feed-2449-4ab0-8c90-526281d8f114\") " Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.657522 4884 scope.go:117] "RemoveContainer" containerID="363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.660735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities" (OuterVolumeSpecName: "utilities") pod "0659feed-2449-4ab0-8c90-526281d8f114" (UID: "0659feed-2449-4ab0-8c90-526281d8f114"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.662263 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9" (OuterVolumeSpecName: "kube-api-access-f9nq9") pod "0659feed-2449-4ab0-8c90-526281d8f114" (UID: "0659feed-2449-4ab0-8c90-526281d8f114"). InnerVolumeSpecName "kube-api-access-f9nq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.710653 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0659feed-2449-4ab0-8c90-526281d8f114" (UID: "0659feed-2449-4ab0-8c90-526281d8f114"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.725165 4884 scope.go:117] "RemoveContainer" containerID="b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29" Nov 28 16:48:23 crc kubenswrapper[4884]: E1128 16:48:23.725669 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29\": container with ID starting with b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29 not found: ID does not exist" containerID="b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.725719 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29"} err="failed to get container status \"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29\": rpc error: code = NotFound desc = could not find container \"b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29\": container with ID starting with b224281be836a6f81f173dc5e21ee766f3a905b1bd8a38d62a92191b74bdca29 not found: ID does not exist" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.725752 4884 scope.go:117] "RemoveContainer" containerID="047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f" Nov 28 16:48:23 crc kubenswrapper[4884]: E1128 16:48:23.726107 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f\": container with ID starting with 047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f not found: ID does not exist" containerID="047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.726152 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f"} err="failed to get container status \"047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f\": rpc error: code = NotFound desc = could not find container \"047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f\": container with ID starting with 047db4f5999681caa7b1fe19efae40c43c8190a8c0be2f3cf389296d0420909f not found: ID does not exist" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.726183 4884 scope.go:117] "RemoveContainer" containerID="363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec" Nov 28 16:48:23 crc kubenswrapper[4884]: E1128 16:48:23.726492 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec\": container with ID starting with 363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec not found: ID does not exist" containerID="363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.726516 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec"} err="failed to get container status \"363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec\": rpc error: code = NotFound desc = could not find container \"363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec\": container with ID starting with 363eab6c7355d2bdaef4ebfe958a9b79c79411025698137efab0a9206d5943ec not found: ID does not exist" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.757852 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.758244 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9nq9\" (UniqueName: \"kubernetes.io/projected/0659feed-2449-4ab0-8c90-526281d8f114-kube-api-access-f9nq9\") on node \"crc\" DevicePath \"\"" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.758310 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0659feed-2449-4ab0-8c90-526281d8f114-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.947361 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:23 crc kubenswrapper[4884]: I1128 16:48:23.955355 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tdm6q"] Nov 28 16:48:24 crc kubenswrapper[4884]: I1128 16:48:24.699991 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0659feed-2449-4ab0-8c90-526281d8f114" path="/var/lib/kubelet/pods/0659feed-2449-4ab0-8c90-526281d8f114/volumes" Nov 28 16:48:32 crc kubenswrapper[4884]: I1128 16:48:32.687879 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:48:32 crc kubenswrapper[4884]: E1128 16:48:32.688732 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:43 crc kubenswrapper[4884]: I1128 16:48:43.688759 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:48:43 crc kubenswrapper[4884]: E1128 16:48:43.689545 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.020757 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:48:55 crc kubenswrapper[4884]: E1128 16:48:55.021875 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf3c89c8-123d-421c-9317-a315ac8b7891" containerName="mariadb-client" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.021897 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf3c89c8-123d-421c-9317-a315ac8b7891" containerName="mariadb-client" Nov 28 16:48:55 crc kubenswrapper[4884]: E1128 16:48:55.021921 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="extract-utilities" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.021931 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="extract-utilities" Nov 28 16:48:55 crc kubenswrapper[4884]: E1128 16:48:55.021949 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="extract-content" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.021959 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="extract-content" Nov 28 16:48:55 crc kubenswrapper[4884]: E1128 16:48:55.021984 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="registry-server" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.021991 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="registry-server" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.022228 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0659feed-2449-4ab0-8c90-526281d8f114" containerName="registry-server" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.022250 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf3c89c8-123d-421c-9317-a315ac8b7891" containerName="mariadb-client" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.023301 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.025276 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-5ffls" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.025693 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.025773 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.036587 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.038233 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.053313 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.064502 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.066709 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.074175 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.085422 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091343 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5817509-87ff-4139-a4af-c95881825b8a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091414 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93b03444-4c92-4783-90ba-91fd986a3c55-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091477 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-config\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091537 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5817509-87ff-4139-a4af-c95881825b8a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091601 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-config\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091623 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b03444-4c92-4783-90ba-91fd986a3c55-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091672 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091701 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.091719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8svhw\" (UniqueName: \"kubernetes.io/projected/a5817509-87ff-4139-a4af-c95881825b8a-kube-api-access-8svhw\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.092179 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjthb\" (UniqueName: \"kubernetes.io/projected/93b03444-4c92-4783-90ba-91fd986a3c55-kube-api-access-hjthb\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194006 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93b03444-4c92-4783-90ba-91fd986a3c55-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194085 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-config\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194118 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/af2877a4-7652-44cc-a491-21592c862759-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194183 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5817509-87ff-4139-a4af-c95881825b8a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194205 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194223 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-config\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b03444-4c92-4783-90ba-91fd986a3c55-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2877a4-7652-44cc-a491-21592c862759-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194329 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8svhw\" (UniqueName: \"kubernetes.io/projected/a5817509-87ff-4139-a4af-c95881825b8a-kube-api-access-8svhw\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194349 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjthb\" (UniqueName: \"kubernetes.io/projected/93b03444-4c92-4783-90ba-91fd986a3c55-kube-api-access-hjthb\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194365 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-config\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn98g\" (UniqueName: \"kubernetes.io/projected/af2877a4-7652-44cc-a491-21592c862759-kube-api-access-bn98g\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194401 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5817509-87ff-4139-a4af-c95881825b8a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.194625 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/93b03444-4c92-4783-90ba-91fd986a3c55-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.195496 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-config\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.201513 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5817509-87ff-4139-a4af-c95881825b8a-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.204775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-config\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.205129 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93b03444-4c92-4783-90ba-91fd986a3c55-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.205703 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5817509-87ff-4139-a4af-c95881825b8a-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.206396 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a5817509-87ff-4139-a4af-c95881825b8a-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.206601 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b03444-4c92-4783-90ba-91fd986a3c55-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.217731 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.217821 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/23303f01a4eb4c8995865cccbe33277dab41e5fa1f19eb5e0d947a1ba71e90ef/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.227861 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.227919 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c746273fcb5e15c9a2385d522be9a5e95817751c3dcd6ae0cbba28b8e1bb8486/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.232981 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjthb\" (UniqueName: \"kubernetes.io/projected/93b03444-4c92-4783-90ba-91fd986a3c55-kube-api-access-hjthb\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.237652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8svhw\" (UniqueName: \"kubernetes.io/projected/a5817509-87ff-4139-a4af-c95881825b8a-kube-api-access-8svhw\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.245241 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.248754 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.253961 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.254372 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.254918 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-sgmpt" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.258337 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.260051 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.269929 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.273013 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.275127 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0c0a033a-6d26-47d8-9ace-f9a2feded65e\") pod \"ovsdbserver-nb-2\" (UID: \"93b03444-4c92-4783-90ba-91fd986a3c55\") " pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.278446 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.287537 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.295133 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296002 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn98g\" (UniqueName: \"kubernetes.io/projected/af2877a4-7652-44cc-a491-21592c862759-kube-api-access-bn98g\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296155 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4nh6\" (UniqueName: \"kubernetes.io/projected/c638bade-936d-4342-8146-527c2cb80373-kube-api-access-w4nh6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296183 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-config\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296521 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296606 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/af2877a4-7652-44cc-a491-21592c862759-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.296682 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297112 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297227 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c638bade-936d-4342-8146-527c2cb80373-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297277 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297372 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c638bade-936d-4342-8146-527c2cb80373-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297447 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2877a4-7652-44cc-a491-21592c862759-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-config\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.297606 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdtbv\" (UniqueName: \"kubernetes.io/projected/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-kube-api-access-gdtbv\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.298723 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/af2877a4-7652-44cc-a491-21592c862759-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.300361 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-config\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.304961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af2877a4-7652-44cc-a491-21592c862759-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.306026 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.306081 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8cb47337ed03ce742d724308cd58a02793613d8452796668b3998d6eb70a5a3b/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.306551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af2877a4-7652-44cc-a491-21592c862759-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.308648 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-adb4b9fa-8d7d-41fe-a9f8-ec3f0dbad5d8\") pod \"ovsdbserver-nb-0\" (UID: \"a5817509-87ff-4139-a4af-c95881825b8a\") " pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.314675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn98g\" (UniqueName: \"kubernetes.io/projected/af2877a4-7652-44cc-a491-21592c862759-kube-api-access-bn98g\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.333876 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0113ff32-3b28-4fe6-a841-a81b033b5256\") pod \"ovsdbserver-nb-1\" (UID: \"af2877a4-7652-44cc-a491-21592c862759\") " pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.341145 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.362986 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.385850 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399582 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdtbv\" (UniqueName: \"kubernetes.io/projected/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-kube-api-access-gdtbv\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399671 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4nh6\" (UniqueName: \"kubernetes.io/projected/c638bade-936d-4342-8146-527c2cb80373-kube-api-access-w4nh6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-config\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399936 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.399973 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400023 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400065 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400190 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c638bade-936d-4342-8146-527c2cb80373-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.400301 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c638bade-936d-4342-8146-527c2cb80373-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.401314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c638bade-936d-4342-8146-527c2cb80373-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.402881 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.403911 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.404716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c638bade-936d-4342-8146-527c2cb80373-config\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.406009 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-config\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.409525 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.409565 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ce4c15ea97ed7e5781e326f38d4732cc8aa2bca5d06ea2f99bf180195b6a2d46/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.409772 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.409797 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2caa7280716c387176745639864bae6f33c6f2f35f0ff4fd6559ccdb112072d9/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.410509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.414947 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.420478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c638bade-936d-4342-8146-527c2cb80373-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.427852 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4nh6\" (UniqueName: \"kubernetes.io/projected/c638bade-936d-4342-8146-527c2cb80373-kube-api-access-w4nh6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.440826 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdtbv\" (UniqueName: \"kubernetes.io/projected/ab5a5a79-99a6-488e-a1d0-68b07c36b62e-kube-api-access-gdtbv\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.461369 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a81cc89e-6d04-4616-8591-5965cfaadaa6\") pod \"ovsdbserver-sb-2\" (UID: \"c638bade-936d-4342-8146-527c2cb80373\") " pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.480273 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6875f5a9-1b5f-44ac-9eda-4261ba5b2bff\") pod \"ovsdbserver-sb-0\" (UID: \"ab5a5a79-99a6-488e-a1d0-68b07c36b62e\") " pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505054 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdm7h\" (UniqueName: \"kubernetes.io/projected/13d20f27-740b-463c-ae05-cae54c02c404-kube-api-access-kdm7h\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13d20f27-740b-463c-ae05-cae54c02c404-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505470 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13d20f27-740b-463c-ae05-cae54c02c404-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505596 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.505627 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-config\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.606760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-config\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.606850 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdm7h\" (UniqueName: \"kubernetes.io/projected/13d20f27-740b-463c-ae05-cae54c02c404-kube-api-access-kdm7h\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.606954 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.607001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13d20f27-740b-463c-ae05-cae54c02c404-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.607048 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13d20f27-740b-463c-ae05-cae54c02c404-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.607144 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.607782 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-config\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.609110 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/13d20f27-740b-463c-ae05-cae54c02c404-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.609495 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/13d20f27-740b-463c-ae05-cae54c02c404-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.611586 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.611634 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9e59ddba99614f5fbc4034c00bbb2b98879fc3b89a3663e6c13aa765d904b593/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.613706 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13d20f27-740b-463c-ae05-cae54c02c404-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.625417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdm7h\" (UniqueName: \"kubernetes.io/projected/13d20f27-740b-463c-ae05-cae54c02c404-kube-api-access-kdm7h\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.653462 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-93fdf3cc-d580-44aa-af91-833edb0669fd\") pod \"ovsdbserver-sb-1\" (UID: \"13d20f27-740b-463c-ae05-cae54c02c404\") " pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.688864 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:48:55 crc kubenswrapper[4884]: E1128 16:48:55.689078 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.740721 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.758782 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.766927 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.901036 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 16:48:55 crc kubenswrapper[4884]: I1128 16:48:55.992075 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.301948 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.458787 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 16:48:56 crc kubenswrapper[4884]: W1128 16:48:56.473381 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13d20f27_740b_463c_ae05_cae54c02c404.slice/crio-1d42cc30e7e4d4be2c9638b63ae360557d48319acc687ff90d9f2c795676968f WatchSource:0}: Error finding container 1d42cc30e7e4d4be2c9638b63ae360557d48319acc687ff90d9f2c795676968f: Status 404 returned error can't find the container with id 1d42cc30e7e4d4be2c9638b63ae360557d48319acc687ff90d9f2c795676968f Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.682716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 16:48:56 crc kubenswrapper[4884]: W1128 16:48:56.692562 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf2877a4_7652_44cc_a491_21592c862759.slice/crio-542279f36f5cfb4505992c660a449558cffe0182370c1a251adf158e518ba0f6 WatchSource:0}: Error finding container 542279f36f5cfb4505992c660a449558cffe0182370c1a251adf158e518ba0f6: Status 404 returned error can't find the container with id 542279f36f5cfb4505992c660a449558cffe0182370c1a251adf158e518ba0f6 Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.896547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"af2877a4-7652-44cc-a491-21592c862759","Type":"ContainerStarted","Data":"cd847e7906efc3c71ad971ce346bbf028ba74ac288bae037afad2616065cb530"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.897156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"af2877a4-7652-44cc-a491-21592c862759","Type":"ContainerStarted","Data":"542279f36f5cfb4505992c660a449558cffe0182370c1a251adf158e518ba0f6"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.900475 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"93b03444-4c92-4783-90ba-91fd986a3c55","Type":"ContainerStarted","Data":"26d50a3a89edfe17a22fc28852582db5965f4cda611b0741c13a64f29ee7fa30"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.900616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"93b03444-4c92-4783-90ba-91fd986a3c55","Type":"ContainerStarted","Data":"2726c03d6e95dcb4fafb2973ee4f74221b69ecc767ba17400efb58265d58a4c2"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.900694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"93b03444-4c92-4783-90ba-91fd986a3c55","Type":"ContainerStarted","Data":"4ef57ad9a6e21f48fa64bc20adf9a4678bc62752de24c85038574b2bda6a15dd"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.902582 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a5817509-87ff-4139-a4af-c95881825b8a","Type":"ContainerStarted","Data":"6d26b8fdc7669b43c966ef6e28c5e4df2d281961c2c2345ad62e528e5b8ea850"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.902758 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a5817509-87ff-4139-a4af-c95881825b8a","Type":"ContainerStarted","Data":"ba12ac3a5240fc67d1eba9b57a82b4fa8eb747a9a68954a6807291414549b8dd"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.903024 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"a5817509-87ff-4139-a4af-c95881825b8a","Type":"ContainerStarted","Data":"9c67d2e3c34e290ae15c770618b451221eefe9f00363afc4cddfc90659b7453c"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.904256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"13d20f27-740b-463c-ae05-cae54c02c404","Type":"ContainerStarted","Data":"214e430594687b5c67928141cca57e9946ef5d4efcea2655d30e5337e9e66fc6"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.904357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"13d20f27-740b-463c-ae05-cae54c02c404","Type":"ContainerStarted","Data":"8d1d88efb01ea157cd2e6aefd5ef658462f1982f3f4a89b162c6e18af7444040"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.904416 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"13d20f27-740b-463c-ae05-cae54c02c404","Type":"ContainerStarted","Data":"1d42cc30e7e4d4be2c9638b63ae360557d48319acc687ff90d9f2c795676968f"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.906064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"c638bade-936d-4342-8146-527c2cb80373","Type":"ContainerStarted","Data":"55a533b0f4160d8ebb44f9b87c80c5cc8a61afba983094c7754631e2d8375209"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.906110 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"c638bade-936d-4342-8146-527c2cb80373","Type":"ContainerStarted","Data":"88a00eaf258bd6fb6a900d6ed35f4a64c04b1191ad8201458139f8c1d4051b07"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.906120 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"c638bade-936d-4342-8146-527c2cb80373","Type":"ContainerStarted","Data":"7edbb74df487d98c53a9b9555c6bfa112d7de6419dfb2de351f6d62a5b8245f4"} Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.919105 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=2.919070671 podStartE2EDuration="2.919070671s" podCreationTimestamp="2025-11-28 16:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:56.915578456 +0000 UTC m=+5376.478362257" watchObservedRunningTime="2025-11-28 16:48:56.919070671 +0000 UTC m=+5376.481854472" Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.936067 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=2.936042366 podStartE2EDuration="2.936042366s" podCreationTimestamp="2025-11-28 16:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:56.932582162 +0000 UTC m=+5376.495365963" watchObservedRunningTime="2025-11-28 16:48:56.936042366 +0000 UTC m=+5376.498826167" Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.953170 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=2.953130904 podStartE2EDuration="2.953130904s" podCreationTimestamp="2025-11-28 16:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:56.948593884 +0000 UTC m=+5376.511377705" watchObservedRunningTime="2025-11-28 16:48:56.953130904 +0000 UTC m=+5376.515914705" Nov 28 16:48:56 crc kubenswrapper[4884]: I1128 16:48:56.969369 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.969348432 podStartE2EDuration="3.969348432s" podCreationTimestamp="2025-11-28 16:48:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:56.967990808 +0000 UTC m=+5376.530774629" watchObservedRunningTime="2025-11-28 16:48:56.969348432 +0000 UTC m=+5376.532132253" Nov 28 16:48:57 crc kubenswrapper[4884]: W1128 16:48:57.340157 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab5a5a79_99a6_488e_a1d0_68b07c36b62e.slice/crio-d7bb50ccf97af98490ffc6ff5c0d540d662b1d2c3a201c375e6081c4de2b7a4a WatchSource:0}: Error finding container d7bb50ccf97af98490ffc6ff5c0d540d662b1d2c3a201c375e6081c4de2b7a4a: Status 404 returned error can't find the container with id d7bb50ccf97af98490ffc6ff5c0d540d662b1d2c3a201c375e6081c4de2b7a4a Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.346508 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.919280 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"af2877a4-7652-44cc-a491-21592c862759","Type":"ContainerStarted","Data":"eda6ca62f46c3b0d1cd2b99f4581aa470444d030c3c78ea16fb6f5e220f7602c"} Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.921501 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ab5a5a79-99a6-488e-a1d0-68b07c36b62e","Type":"ContainerStarted","Data":"c234e0dfd4ff7362699293c67d19a50aec0a0b77f3668868a202fdac1ef5b3d4"} Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.921532 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ab5a5a79-99a6-488e-a1d0-68b07c36b62e","Type":"ContainerStarted","Data":"7b04f5fd2c700ea1e0a2c348aca4f5ec8bde241d81cf37f091fe14d92f3c43c0"} Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.921544 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"ab5a5a79-99a6-488e-a1d0-68b07c36b62e","Type":"ContainerStarted","Data":"d7bb50ccf97af98490ffc6ff5c0d540d662b1d2c3a201c375e6081c4de2b7a4a"} Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.947641 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.947619441 podStartE2EDuration="3.947619441s" podCreationTimestamp="2025-11-28 16:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:57.939495222 +0000 UTC m=+5377.502279043" watchObservedRunningTime="2025-11-28 16:48:57.947619441 +0000 UTC m=+5377.510403252" Nov 28 16:48:57 crc kubenswrapper[4884]: I1128 16:48:57.958688 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.958671272 podStartE2EDuration="3.958671272s" podCreationTimestamp="2025-11-28 16:48:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:48:57.954617513 +0000 UTC m=+5377.517401374" watchObservedRunningTime="2025-11-28 16:48:57.958671272 +0000 UTC m=+5377.521455073" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.341766 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.363749 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.386218 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.741491 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.758951 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 28 16:48:58 crc kubenswrapper[4884]: I1128 16:48:58.767692 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.342235 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.364486 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.386704 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.741921 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.759632 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 28 16:49:00 crc kubenswrapper[4884]: I1128 16:49:00.767439 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.397880 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.406570 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.426928 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.458955 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.768941 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.770955 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.776103 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.783352 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.810683 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.820389 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.822844 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.879696 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.880063 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.934190 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.934267 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.934614 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx7wb\" (UniqueName: \"kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.934772 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.986245 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 16:49:01 crc kubenswrapper[4884]: I1128 16:49:01.989439 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.052377 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.052455 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.052529 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx7wb\" (UniqueName: \"kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.052607 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.054412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.054478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.055355 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.075436 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx7wb\" (UniqueName: \"kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb\") pod \"dnsmasq-dns-8578f89889-zpb5k\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.117487 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.156864 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.189204 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.196311 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.198291 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.203387 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.255730 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.256064 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9jmm\" (UniqueName: \"kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.256106 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.256164 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.256202 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.358124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.358193 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.358235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.358267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9jmm\" (UniqueName: \"kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.358291 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.359181 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.359215 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.359412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.359708 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.400532 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9jmm\" (UniqueName: \"kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm\") pod \"dnsmasq-dns-6669754c7-ml5pn\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.552419 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:02 crc kubenswrapper[4884]: W1128 16:49:02.595468 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod400b1657_fe42_4359_a73e_e80060109fae.slice/crio-35ff59f9cd302a0e3d8d8fc99b7b522a441dcf885ae0e0a6641c0729a8867eb1 WatchSource:0}: Error finding container 35ff59f9cd302a0e3d8d8fc99b7b522a441dcf885ae0e0a6641c0729a8867eb1: Status 404 returned error can't find the container with id 35ff59f9cd302a0e3d8d8fc99b7b522a441dcf885ae0e0a6641c0729a8867eb1 Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.602206 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.960200 4884 generic.go:334] "Generic (PLEG): container finished" podID="400b1657-fe42-4359-a73e-e80060109fae" containerID="09d143645d7a90140ad869200e833470db81adb16f06d926b4805e3aba2cda65" exitCode=0 Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.960311 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" event={"ID":"400b1657-fe42-4359-a73e-e80060109fae","Type":"ContainerDied","Data":"09d143645d7a90140ad869200e833470db81adb16f06d926b4805e3aba2cda65"} Nov 28 16:49:02 crc kubenswrapper[4884]: I1128 16:49:02.960531 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" event={"ID":"400b1657-fe42-4359-a73e-e80060109fae","Type":"ContainerStarted","Data":"35ff59f9cd302a0e3d8d8fc99b7b522a441dcf885ae0e0a6641c0729a8867eb1"} Nov 28 16:49:03 crc kubenswrapper[4884]: W1128 16:49:03.017071 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod569fa6b0_ccc4_491e_bf05_9e9447a52429.slice/crio-c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94 WatchSource:0}: Error finding container c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94: Status 404 returned error can't find the container with id c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94 Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.022587 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.262756 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.371255 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc\") pod \"400b1657-fe42-4359-a73e-e80060109fae\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.371319 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb\") pod \"400b1657-fe42-4359-a73e-e80060109fae\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.371534 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config\") pod \"400b1657-fe42-4359-a73e-e80060109fae\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.371609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gx7wb\" (UniqueName: \"kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb\") pod \"400b1657-fe42-4359-a73e-e80060109fae\" (UID: \"400b1657-fe42-4359-a73e-e80060109fae\") " Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.374812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb" (OuterVolumeSpecName: "kube-api-access-gx7wb") pod "400b1657-fe42-4359-a73e-e80060109fae" (UID: "400b1657-fe42-4359-a73e-e80060109fae"). InnerVolumeSpecName "kube-api-access-gx7wb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.389628 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config" (OuterVolumeSpecName: "config") pod "400b1657-fe42-4359-a73e-e80060109fae" (UID: "400b1657-fe42-4359-a73e-e80060109fae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.394154 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "400b1657-fe42-4359-a73e-e80060109fae" (UID: "400b1657-fe42-4359-a73e-e80060109fae"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.395634 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "400b1657-fe42-4359-a73e-e80060109fae" (UID: "400b1657-fe42-4359-a73e-e80060109fae"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.472993 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.473029 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gx7wb\" (UniqueName: \"kubernetes.io/projected/400b1657-fe42-4359-a73e-e80060109fae-kube-api-access-gx7wb\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.473039 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.473048 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/400b1657-fe42-4359-a73e-e80060109fae-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.974792 4884 generic.go:334] "Generic (PLEG): container finished" podID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerID="f5e4d6b272f322b4cc40694984b9ba179a9bc9ef2a3831b53d793ae596d67a8b" exitCode=0 Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.974860 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" event={"ID":"569fa6b0-ccc4-491e-bf05-9e9447a52429","Type":"ContainerDied","Data":"f5e4d6b272f322b4cc40694984b9ba179a9bc9ef2a3831b53d793ae596d67a8b"} Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.974887 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" event={"ID":"569fa6b0-ccc4-491e-bf05-9e9447a52429","Type":"ContainerStarted","Data":"c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94"} Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.976360 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" event={"ID":"400b1657-fe42-4359-a73e-e80060109fae","Type":"ContainerDied","Data":"35ff59f9cd302a0e3d8d8fc99b7b522a441dcf885ae0e0a6641c0729a8867eb1"} Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.976404 4884 scope.go:117] "RemoveContainer" containerID="09d143645d7a90140ad869200e833470db81adb16f06d926b4805e3aba2cda65" Nov 28 16:49:03 crc kubenswrapper[4884]: I1128 16:49:03.976463 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8578f89889-zpb5k" Nov 28 16:49:04 crc kubenswrapper[4884]: I1128 16:49:04.054461 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:04 crc kubenswrapper[4884]: I1128 16:49:04.060691 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8578f89889-zpb5k"] Nov 28 16:49:04 crc kubenswrapper[4884]: I1128 16:49:04.705767 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="400b1657-fe42-4359-a73e-e80060109fae" path="/var/lib/kubelet/pods/400b1657-fe42-4359-a73e-e80060109fae/volumes" Nov 28 16:49:05 crc kubenswrapper[4884]: I1128 16:49:05.822816 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 16:49:07 crc kubenswrapper[4884]: I1128 16:49:07.011641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" event={"ID":"569fa6b0-ccc4-491e-bf05-9e9447a52429","Type":"ContainerStarted","Data":"768cd946d195b086b527d9cb598392ceccd0270745f82dc10160d86ba39211de"} Nov 28 16:49:07 crc kubenswrapper[4884]: I1128 16:49:07.014050 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:07 crc kubenswrapper[4884]: I1128 16:49:07.687881 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:49:07 crc kubenswrapper[4884]: E1128 16:49:07.688248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.319964 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" podStartSLOduration=6.319943934 podStartE2EDuration="6.319943934s" podCreationTimestamp="2025-11-28 16:49:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:07.034322623 +0000 UTC m=+5386.597106444" watchObservedRunningTime="2025-11-28 16:49:08.319943934 +0000 UTC m=+5387.882727735" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.326894 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:49:08 crc kubenswrapper[4884]: E1128 16:49:08.327317 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="400b1657-fe42-4359-a73e-e80060109fae" containerName="init" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.327336 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="400b1657-fe42-4359-a73e-e80060109fae" containerName="init" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.327494 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="400b1657-fe42-4359-a73e-e80060109fae" containerName="init" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.327998 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.331028 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.337694 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.461298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnf98\" (UniqueName: \"kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.461442 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.461522 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.563519 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnf98\" (UniqueName: \"kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.563594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.563646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.566724 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.566763 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3a7b802d2db3d5a9df0cb169b1b9d2891b7d1edc737794907da5e81f44d445ca/globalmount\"" pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.583869 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.594217 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnf98\" (UniqueName: \"kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.600464 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") pod \"ovn-copy-data\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " pod="openstack/ovn-copy-data" Nov 28 16:49:08 crc kubenswrapper[4884]: I1128 16:49:08.654765 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 16:49:09 crc kubenswrapper[4884]: I1128 16:49:09.174622 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:49:10 crc kubenswrapper[4884]: I1128 16:49:10.041362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff","Type":"ContainerStarted","Data":"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb"} Nov 28 16:49:10 crc kubenswrapper[4884]: I1128 16:49:10.041672 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff","Type":"ContainerStarted","Data":"c839eb4eac4babf27d9ec7b3786f17b896839bb683f15a029a7b8607413fe0c9"} Nov 28 16:49:10 crc kubenswrapper[4884]: I1128 16:49:10.065263 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.065239883 podStartE2EDuration="3.065239883s" podCreationTimestamp="2025-11-28 16:49:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:10.06307558 +0000 UTC m=+5389.625859401" watchObservedRunningTime="2025-11-28 16:49:10.065239883 +0000 UTC m=+5389.628023704" Nov 28 16:49:12 crc kubenswrapper[4884]: I1128 16:49:12.554045 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:12 crc kubenswrapper[4884]: I1128 16:49:12.647155 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:49:12 crc kubenswrapper[4884]: I1128 16:49:12.647433 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="dnsmasq-dns" containerID="cri-o://40376a42be5cc3a62b91ddf5068ec0b08458201dea3305d383598b436418a960" gracePeriod=10 Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.070738 4884 generic.go:334] "Generic (PLEG): container finished" podID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerID="40376a42be5cc3a62b91ddf5068ec0b08458201dea3305d383598b436418a960" exitCode=0 Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.070791 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" event={"ID":"4d5471a8-72a3-4a6c-a70d-12daf2c94973","Type":"ContainerDied","Data":"40376a42be5cc3a62b91ddf5068ec0b08458201dea3305d383598b436418a960"} Nov 28 16:49:13 crc kubenswrapper[4884]: E1128 16:49:13.394969 4884 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.189:52858->38.102.83.189:45559: write tcp 38.102.83.189:52858->38.102.83.189:45559: write: broken pipe Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.638385 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.758049 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config\") pod \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.758336 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc\") pod \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.758517 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6w22\" (UniqueName: \"kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22\") pod \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\" (UID: \"4d5471a8-72a3-4a6c-a70d-12daf2c94973\") " Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.764177 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22" (OuterVolumeSpecName: "kube-api-access-s6w22") pod "4d5471a8-72a3-4a6c-a70d-12daf2c94973" (UID: "4d5471a8-72a3-4a6c-a70d-12daf2c94973"). InnerVolumeSpecName "kube-api-access-s6w22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.794673 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4d5471a8-72a3-4a6c-a70d-12daf2c94973" (UID: "4d5471a8-72a3-4a6c-a70d-12daf2c94973"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.801813 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config" (OuterVolumeSpecName: "config") pod "4d5471a8-72a3-4a6c-a70d-12daf2c94973" (UID: "4d5471a8-72a3-4a6c-a70d-12daf2c94973"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.860632 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.860658 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6w22\" (UniqueName: \"kubernetes.io/projected/4d5471a8-72a3-4a6c-a70d-12daf2c94973-kube-api-access-s6w22\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:13 crc kubenswrapper[4884]: I1128 16:49:13.860668 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d5471a8-72a3-4a6c-a70d-12daf2c94973-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.089606 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" event={"ID":"4d5471a8-72a3-4a6c-a70d-12daf2c94973","Type":"ContainerDied","Data":"13bec95d5200dfb8e660cc92671f6fbb63107626f8c5f7d02b0db44007ba206b"} Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.089902 4884 scope.go:117] "RemoveContainer" containerID="40376a42be5cc3a62b91ddf5068ec0b08458201dea3305d383598b436418a960" Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.090446 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-cbph4" Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.125732 4884 scope.go:117] "RemoveContainer" containerID="a21b8a5949b0143932059b176c951f3338bb15184632f84e54d23e026b52482c" Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.143839 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.153339 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-cbph4"] Nov 28 16:49:14 crc kubenswrapper[4884]: I1128 16:49:14.706349 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" path="/var/lib/kubelet/pods/4d5471a8-72a3-4a6c-a70d-12daf2c94973/volumes" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.487163 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:49:15 crc kubenswrapper[4884]: E1128 16:49:15.487808 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="init" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.487823 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="init" Nov 28 16:49:15 crc kubenswrapper[4884]: E1128 16:49:15.487842 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="dnsmasq-dns" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.487849 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="dnsmasq-dns" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.488017 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d5471a8-72a3-4a6c-a70d-12daf2c94973" containerName="dnsmasq-dns" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.488879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.491829 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.492458 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.495651 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-kq7zn" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.499552 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.589668 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/adc27ec0-61ce-4045-98ff-894f7bf14067-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.589722 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-config\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.589766 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adc27ec0-61ce-4045-98ff-894f7bf14067-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.589813 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct8w2\" (UniqueName: \"kubernetes.io/projected/adc27ec0-61ce-4045-98ff-894f7bf14067-kube-api-access-ct8w2\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.589947 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-scripts\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.692078 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/adc27ec0-61ce-4045-98ff-894f7bf14067-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.692173 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-config\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.692215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adc27ec0-61ce-4045-98ff-894f7bf14067-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.692265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct8w2\" (UniqueName: \"kubernetes.io/projected/adc27ec0-61ce-4045-98ff-894f7bf14067-kube-api-access-ct8w2\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.692306 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-scripts\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.693464 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-config\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.693629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/adc27ec0-61ce-4045-98ff-894f7bf14067-scripts\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.693799 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/adc27ec0-61ce-4045-98ff-894f7bf14067-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.699043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adc27ec0-61ce-4045-98ff-894f7bf14067-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.721115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct8w2\" (UniqueName: \"kubernetes.io/projected/adc27ec0-61ce-4045-98ff-894f7bf14067-kube-api-access-ct8w2\") pod \"ovn-northd-0\" (UID: \"adc27ec0-61ce-4045-98ff-894f7bf14067\") " pod="openstack/ovn-northd-0" Nov 28 16:49:15 crc kubenswrapper[4884]: I1128 16:49:15.808675 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 16:49:16 crc kubenswrapper[4884]: I1128 16:49:16.245070 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 16:49:17 crc kubenswrapper[4884]: I1128 16:49:17.116635 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"adc27ec0-61ce-4045-98ff-894f7bf14067","Type":"ContainerStarted","Data":"5e425ca5b820f678ad7588fc4b6861f7186025609fdb69951c15e41221232a0c"} Nov 28 16:49:17 crc kubenswrapper[4884]: I1128 16:49:17.116858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"adc27ec0-61ce-4045-98ff-894f7bf14067","Type":"ContainerStarted","Data":"9133a27d0d2adb7700a8d43e0b126f971b36e292e9f82541ce2192c8c7a207a8"} Nov 28 16:49:17 crc kubenswrapper[4884]: I1128 16:49:17.116870 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"adc27ec0-61ce-4045-98ff-894f7bf14067","Type":"ContainerStarted","Data":"aef04fc25e5f2e959be8d298d2906b4d4a1c65ded82044341f9c75fcb54f6fc2"} Nov 28 16:49:17 crc kubenswrapper[4884]: I1128 16:49:17.116885 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 16:49:17 crc kubenswrapper[4884]: I1128 16:49:17.148064 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.148031647 podStartE2EDuration="2.148031647s" podCreationTimestamp="2025-11-28 16:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:17.139495888 +0000 UTC m=+5396.702279739" watchObservedRunningTime="2025-11-28 16:49:17.148031647 +0000 UTC m=+5396.710815488" Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.598498 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-v29hs"] Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.600618 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.609395 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-v29hs"] Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.679469 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn49g\" (UniqueName: \"kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g\") pod \"keystone-db-create-v29hs\" (UID: \"aa482a01-e2e1-4d61-a6a9-d09030c4bee4\") " pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.781589 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn49g\" (UniqueName: \"kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g\") pod \"keystone-db-create-v29hs\" (UID: \"aa482a01-e2e1-4d61-a6a9-d09030c4bee4\") " pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.808119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn49g\" (UniqueName: \"kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g\") pod \"keystone-db-create-v29hs\" (UID: \"aa482a01-e2e1-4d61-a6a9-d09030c4bee4\") " pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:20 crc kubenswrapper[4884]: I1128 16:49:20.933590 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:21 crc kubenswrapper[4884]: I1128 16:49:21.362984 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-v29hs"] Nov 28 16:49:21 crc kubenswrapper[4884]: W1128 16:49:21.378605 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa482a01_e2e1_4d61_a6a9_d09030c4bee4.slice/crio-f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d WatchSource:0}: Error finding container f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d: Status 404 returned error can't find the container with id f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d Nov 28 16:49:22 crc kubenswrapper[4884]: I1128 16:49:22.171933 4884 generic.go:334] "Generic (PLEG): container finished" podID="aa482a01-e2e1-4d61-a6a9-d09030c4bee4" containerID="8bf0d67d1f98131a4193859b2c17646e91a8bfd176f2fb315a6861840a0d09fb" exitCode=0 Nov 28 16:49:22 crc kubenswrapper[4884]: I1128 16:49:22.172026 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-v29hs" event={"ID":"aa482a01-e2e1-4d61-a6a9-d09030c4bee4","Type":"ContainerDied","Data":"8bf0d67d1f98131a4193859b2c17646e91a8bfd176f2fb315a6861840a0d09fb"} Nov 28 16:49:22 crc kubenswrapper[4884]: I1128 16:49:22.172297 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-v29hs" event={"ID":"aa482a01-e2e1-4d61-a6a9-d09030c4bee4","Type":"ContainerStarted","Data":"f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d"} Nov 28 16:49:22 crc kubenswrapper[4884]: I1128 16:49:22.689956 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:49:23 crc kubenswrapper[4884]: I1128 16:49:23.180508 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee"} Nov 28 16:49:23 crc kubenswrapper[4884]: I1128 16:49:23.512295 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:23 crc kubenswrapper[4884]: I1128 16:49:23.634929 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn49g\" (UniqueName: \"kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g\") pod \"aa482a01-e2e1-4d61-a6a9-d09030c4bee4\" (UID: \"aa482a01-e2e1-4d61-a6a9-d09030c4bee4\") " Nov 28 16:49:23 crc kubenswrapper[4884]: I1128 16:49:23.641987 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g" (OuterVolumeSpecName: "kube-api-access-kn49g") pod "aa482a01-e2e1-4d61-a6a9-d09030c4bee4" (UID: "aa482a01-e2e1-4d61-a6a9-d09030c4bee4"). InnerVolumeSpecName "kube-api-access-kn49g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:23 crc kubenswrapper[4884]: I1128 16:49:23.737635 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn49g\" (UniqueName: \"kubernetes.io/projected/aa482a01-e2e1-4d61-a6a9-d09030c4bee4-kube-api-access-kn49g\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:24 crc kubenswrapper[4884]: I1128 16:49:24.188558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-v29hs" event={"ID":"aa482a01-e2e1-4d61-a6a9-d09030c4bee4","Type":"ContainerDied","Data":"f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d"} Nov 28 16:49:24 crc kubenswrapper[4884]: I1128 16:49:24.188598 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9ef095438459e517633d6406427825b8fa33733c1eb9053cb445035bac9045d" Nov 28 16:49:24 crc kubenswrapper[4884]: I1128 16:49:24.188623 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-v29hs" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.603161 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cda9-account-create-bddqz"] Nov 28 16:49:30 crc kubenswrapper[4884]: E1128 16:49:30.604048 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa482a01-e2e1-4d61-a6a9-d09030c4bee4" containerName="mariadb-database-create" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.604064 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa482a01-e2e1-4d61-a6a9-d09030c4bee4" containerName="mariadb-database-create" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.604249 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa482a01-e2e1-4d61-a6a9-d09030c4bee4" containerName="mariadb-database-create" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.604764 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.607670 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.615803 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cda9-account-create-bddqz"] Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.764355 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdzrb\" (UniqueName: \"kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb\") pod \"keystone-cda9-account-create-bddqz\" (UID: \"572b831f-b7ee-4e63-b872-601d5deae7a9\") " pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.866019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdzrb\" (UniqueName: \"kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb\") pod \"keystone-cda9-account-create-bddqz\" (UID: \"572b831f-b7ee-4e63-b872-601d5deae7a9\") " pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.866985 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.886241 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdzrb\" (UniqueName: \"kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb\") pod \"keystone-cda9-account-create-bddqz\" (UID: \"572b831f-b7ee-4e63-b872-601d5deae7a9\") " pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:30 crc kubenswrapper[4884]: I1128 16:49:30.938827 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:31 crc kubenswrapper[4884]: I1128 16:49:31.441437 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cda9-account-create-bddqz"] Nov 28 16:49:32 crc kubenswrapper[4884]: I1128 16:49:32.251429 4884 generic.go:334] "Generic (PLEG): container finished" podID="572b831f-b7ee-4e63-b872-601d5deae7a9" containerID="d1908efe993064e9e087d0b2c11cf0dc2368fef0dbac6fba4aab53ec4ac36a99" exitCode=0 Nov 28 16:49:32 crc kubenswrapper[4884]: I1128 16:49:32.251496 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cda9-account-create-bddqz" event={"ID":"572b831f-b7ee-4e63-b872-601d5deae7a9","Type":"ContainerDied","Data":"d1908efe993064e9e087d0b2c11cf0dc2368fef0dbac6fba4aab53ec4ac36a99"} Nov 28 16:49:32 crc kubenswrapper[4884]: I1128 16:49:32.251817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cda9-account-create-bddqz" event={"ID":"572b831f-b7ee-4e63-b872-601d5deae7a9","Type":"ContainerStarted","Data":"c4322083bbd8e117efd329f55aec480471d69939640401402cba30fad7406634"} Nov 28 16:49:33 crc kubenswrapper[4884]: I1128 16:49:33.617775 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:33 crc kubenswrapper[4884]: I1128 16:49:33.709545 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdzrb\" (UniqueName: \"kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb\") pod \"572b831f-b7ee-4e63-b872-601d5deae7a9\" (UID: \"572b831f-b7ee-4e63-b872-601d5deae7a9\") " Nov 28 16:49:33 crc kubenswrapper[4884]: I1128 16:49:33.715221 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb" (OuterVolumeSpecName: "kube-api-access-fdzrb") pod "572b831f-b7ee-4e63-b872-601d5deae7a9" (UID: "572b831f-b7ee-4e63-b872-601d5deae7a9"). InnerVolumeSpecName "kube-api-access-fdzrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:33 crc kubenswrapper[4884]: I1128 16:49:33.811724 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdzrb\" (UniqueName: \"kubernetes.io/projected/572b831f-b7ee-4e63-b872-601d5deae7a9-kube-api-access-fdzrb\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:34 crc kubenswrapper[4884]: I1128 16:49:34.275621 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cda9-account-create-bddqz" event={"ID":"572b831f-b7ee-4e63-b872-601d5deae7a9","Type":"ContainerDied","Data":"c4322083bbd8e117efd329f55aec480471d69939640401402cba30fad7406634"} Nov 28 16:49:34 crc kubenswrapper[4884]: I1128 16:49:34.275661 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4322083bbd8e117efd329f55aec480471d69939640401402cba30fad7406634" Nov 28 16:49:34 crc kubenswrapper[4884]: I1128 16:49:34.275726 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cda9-account-create-bddqz" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.065859 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-86px5"] Nov 28 16:49:36 crc kubenswrapper[4884]: E1128 16:49:36.066568 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="572b831f-b7ee-4e63-b872-601d5deae7a9" containerName="mariadb-account-create" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.066583 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="572b831f-b7ee-4e63-b872-601d5deae7a9" containerName="mariadb-account-create" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.066778 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="572b831f-b7ee-4e63-b872-601d5deae7a9" containerName="mariadb-account-create" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.067473 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.069344 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.069920 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.070638 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.070922 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hql4x" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.082025 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-86px5"] Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.150109 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.150160 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.150466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hb9dt\" (UniqueName: \"kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.252367 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hb9dt\" (UniqueName: \"kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.252477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.252505 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.260983 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.261057 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.271337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hb9dt\" (UniqueName: \"kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt\") pod \"keystone-db-sync-86px5\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.384083 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:36 crc kubenswrapper[4884]: I1128 16:49:36.891336 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-86px5"] Nov 28 16:49:36 crc kubenswrapper[4884]: W1128 16:49:36.897209 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfad256f3_aa0a_4981_b19d_606902f94277.slice/crio-a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5 WatchSource:0}: Error finding container a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5: Status 404 returned error can't find the container with id a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5 Nov 28 16:49:37 crc kubenswrapper[4884]: I1128 16:49:37.303983 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-86px5" event={"ID":"fad256f3-aa0a-4981-b19d-606902f94277","Type":"ContainerStarted","Data":"5d4850d5fa26952a7ae433ccb47a014d8022bacda2faa956cf7977b908eded76"} Nov 28 16:49:37 crc kubenswrapper[4884]: I1128 16:49:37.304158 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-86px5" event={"ID":"fad256f3-aa0a-4981-b19d-606902f94277","Type":"ContainerStarted","Data":"a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5"} Nov 28 16:49:37 crc kubenswrapper[4884]: I1128 16:49:37.340017 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-86px5" podStartSLOduration=1.339986659 podStartE2EDuration="1.339986659s" podCreationTimestamp="2025-11-28 16:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:37.330859026 +0000 UTC m=+5416.893642867" watchObservedRunningTime="2025-11-28 16:49:37.339986659 +0000 UTC m=+5416.902770490" Nov 28 16:49:39 crc kubenswrapper[4884]: I1128 16:49:39.337054 4884 generic.go:334] "Generic (PLEG): container finished" podID="fad256f3-aa0a-4981-b19d-606902f94277" containerID="5d4850d5fa26952a7ae433ccb47a014d8022bacda2faa956cf7977b908eded76" exitCode=0 Nov 28 16:49:39 crc kubenswrapper[4884]: I1128 16:49:39.337924 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-86px5" event={"ID":"fad256f3-aa0a-4981-b19d-606902f94277","Type":"ContainerDied","Data":"5d4850d5fa26952a7ae433ccb47a014d8022bacda2faa956cf7977b908eded76"} Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.769439 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.842852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data\") pod \"fad256f3-aa0a-4981-b19d-606902f94277\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.843047 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle\") pod \"fad256f3-aa0a-4981-b19d-606902f94277\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.843141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hb9dt\" (UniqueName: \"kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt\") pod \"fad256f3-aa0a-4981-b19d-606902f94277\" (UID: \"fad256f3-aa0a-4981-b19d-606902f94277\") " Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.848060 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt" (OuterVolumeSpecName: "kube-api-access-hb9dt") pod "fad256f3-aa0a-4981-b19d-606902f94277" (UID: "fad256f3-aa0a-4981-b19d-606902f94277"). InnerVolumeSpecName "kube-api-access-hb9dt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.878108 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fad256f3-aa0a-4981-b19d-606902f94277" (UID: "fad256f3-aa0a-4981-b19d-606902f94277"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.888848 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data" (OuterVolumeSpecName: "config-data") pod "fad256f3-aa0a-4981-b19d-606902f94277" (UID: "fad256f3-aa0a-4981-b19d-606902f94277"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.945562 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.945605 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad256f3-aa0a-4981-b19d-606902f94277-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:40 crc kubenswrapper[4884]: I1128 16:49:40.945619 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hb9dt\" (UniqueName: \"kubernetes.io/projected/fad256f3-aa0a-4981-b19d-606902f94277-kube-api-access-hb9dt\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.364076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-86px5" event={"ID":"fad256f3-aa0a-4981-b19d-606902f94277","Type":"ContainerDied","Data":"a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5"} Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.364138 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3b5fd18a9548350741c923643cac8247326afc0de5a255ab42e75c84ada77e5" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.364212 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-86px5" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.629246 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:49:41 crc kubenswrapper[4884]: E1128 16:49:41.631230 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad256f3-aa0a-4981-b19d-606902f94277" containerName="keystone-db-sync" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.631256 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad256f3-aa0a-4981-b19d-606902f94277" containerName="keystone-db-sync" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.631729 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad256f3-aa0a-4981-b19d-606902f94277" containerName="keystone-db-sync" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.633109 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.650222 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.689980 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-kzcfm"] Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.694438 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.696280 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kzcfm"] Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.706416 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.706702 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.706827 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.706926 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hql4x" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767212 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8dmv\" (UniqueName: \"kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767232 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7x4q\" (UniqueName: \"kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767390 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767406 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767421 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767453 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.767472 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.868778 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.868834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.868885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.868912 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8dmv\" (UniqueName: \"kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.868932 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.869006 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.869040 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.869067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7x4q\" (UniqueName: \"kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.870253 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.870304 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.870330 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.870349 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.872430 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.873033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.874928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.893135 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.893695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.894626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.903392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.905672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.915694 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7x4q\" (UniqueName: \"kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q\") pod \"keystone-bootstrap-kzcfm\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.918740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8dmv\" (UniqueName: \"kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv\") pod \"dnsmasq-dns-bd5644bf7-k7wjn\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:41 crc kubenswrapper[4884]: I1128 16:49:41.953530 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:42 crc kubenswrapper[4884]: I1128 16:49:42.020985 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:42 crc kubenswrapper[4884]: I1128 16:49:42.370320 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:49:42 crc kubenswrapper[4884]: I1128 16:49:42.503895 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kzcfm"] Nov 28 16:49:42 crc kubenswrapper[4884]: W1128 16:49:42.505893 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6eb61175_b271_4204_b1d3_cf58130995c3.slice/crio-9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54 WatchSource:0}: Error finding container 9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54: Status 404 returned error can't find the container with id 9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54 Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.389062 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kzcfm" event={"ID":"6eb61175-b271-4204-b1d3-cf58130995c3","Type":"ContainerStarted","Data":"22ca90449ac7e3e109d2317d7964d9e10af47081ba9818148188551185b92805"} Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.389434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kzcfm" event={"ID":"6eb61175-b271-4204-b1d3-cf58130995c3","Type":"ContainerStarted","Data":"9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54"} Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.392862 4884 generic.go:334] "Generic (PLEG): container finished" podID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerID="7d05fd7524e52b3931bcc01ee5fc100e3c595b1dbf02ff0a784d21d2270c94dd" exitCode=0 Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.392912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" event={"ID":"846c0625-e92e-4fce-99c3-ad5934ac0c60","Type":"ContainerDied","Data":"7d05fd7524e52b3931bcc01ee5fc100e3c595b1dbf02ff0a784d21d2270c94dd"} Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.392952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" event={"ID":"846c0625-e92e-4fce-99c3-ad5934ac0c60","Type":"ContainerStarted","Data":"057aa9c76a3fca2daa1fcd0be6416e6407d9e278d6bc8d3930863a262c9919ee"} Nov 28 16:49:43 crc kubenswrapper[4884]: I1128 16:49:43.455809 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-kzcfm" podStartSLOduration=2.45578797 podStartE2EDuration="2.45578797s" podCreationTimestamp="2025-11-28 16:49:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:43.405307034 +0000 UTC m=+5422.968090875" watchObservedRunningTime="2025-11-28 16:49:43.45578797 +0000 UTC m=+5423.018571771" Nov 28 16:49:44 crc kubenswrapper[4884]: I1128 16:49:44.406306 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" event={"ID":"846c0625-e92e-4fce-99c3-ad5934ac0c60","Type":"ContainerStarted","Data":"e161aa2acb46e092ccf896af782644661e79c90b937e1fd680da7162138ebdd2"} Nov 28 16:49:44 crc kubenswrapper[4884]: I1128 16:49:44.406750 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:46 crc kubenswrapper[4884]: I1128 16:49:46.423225 4884 generic.go:334] "Generic (PLEG): container finished" podID="6eb61175-b271-4204-b1d3-cf58130995c3" containerID="22ca90449ac7e3e109d2317d7964d9e10af47081ba9818148188551185b92805" exitCode=0 Nov 28 16:49:46 crc kubenswrapper[4884]: I1128 16:49:46.423284 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kzcfm" event={"ID":"6eb61175-b271-4204-b1d3-cf58130995c3","Type":"ContainerDied","Data":"22ca90449ac7e3e109d2317d7964d9e10af47081ba9818148188551185b92805"} Nov 28 16:49:46 crc kubenswrapper[4884]: I1128 16:49:46.439853 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" podStartSLOduration=5.439835313 podStartE2EDuration="5.439835313s" podCreationTimestamp="2025-11-28 16:49:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:44.438253132 +0000 UTC m=+5424.001036963" watchObservedRunningTime="2025-11-28 16:49:46.439835313 +0000 UTC m=+5426.002619114" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.796345 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881507 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881579 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881710 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.881780 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7x4q\" (UniqueName: \"kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q\") pod \"6eb61175-b271-4204-b1d3-cf58130995c3\" (UID: \"6eb61175-b271-4204-b1d3-cf58130995c3\") " Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.887791 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.888192 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.888412 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q" (OuterVolumeSpecName: "kube-api-access-x7x4q") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "kube-api-access-x7x4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.892316 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts" (OuterVolumeSpecName: "scripts") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.906656 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.908735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data" (OuterVolumeSpecName: "config-data") pod "6eb61175-b271-4204-b1d3-cf58130995c3" (UID: "6eb61175-b271-4204-b1d3-cf58130995c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984183 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984456 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984544 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7x4q\" (UniqueName: \"kubernetes.io/projected/6eb61175-b271-4204-b1d3-cf58130995c3-kube-api-access-x7x4q\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984632 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984763 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:47 crc kubenswrapper[4884]: I1128 16:49:47.984849 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6eb61175-b271-4204-b1d3-cf58130995c3-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.444064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kzcfm" event={"ID":"6eb61175-b271-4204-b1d3-cf58130995c3","Type":"ContainerDied","Data":"9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54"} Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.444119 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9240839b160f0fc1d7408b92644d692aff05b9516e8d4d6a7b75dad7fbfebb54" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.444175 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kzcfm" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.513253 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-kzcfm"] Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.520910 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-kzcfm"] Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.602378 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-cgz8w"] Nov 28 16:49:48 crc kubenswrapper[4884]: E1128 16:49:48.602721 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eb61175-b271-4204-b1d3-cf58130995c3" containerName="keystone-bootstrap" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.602743 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eb61175-b271-4204-b1d3-cf58130995c3" containerName="keystone-bootstrap" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.602955 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eb61175-b271-4204-b1d3-cf58130995c3" containerName="keystone-bootstrap" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.603528 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.605844 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.605884 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.605891 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.607228 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hql4x" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.619009 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cgz8w"] Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694690 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694809 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g87hx\" (UniqueName: \"kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.694880 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.697069 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6eb61175-b271-4204-b1d3-cf58130995c3" path="/var/lib/kubelet/pods/6eb61175-b271-4204-b1d3-cf58130995c3/volumes" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.796312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.796561 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g87hx\" (UniqueName: \"kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.796665 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.796762 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.796885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.797807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.802960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.804652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.806538 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.806761 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.809360 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.826971 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g87hx\" (UniqueName: \"kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx\") pod \"keystone-bootstrap-cgz8w\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:48 crc kubenswrapper[4884]: I1128 16:49:48.962779 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:49 crc kubenswrapper[4884]: I1128 16:49:49.405976 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-cgz8w"] Nov 28 16:49:49 crc kubenswrapper[4884]: I1128 16:49:49.454844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cgz8w" event={"ID":"10baaa35-44ec-4071-b281-56d439579fdd","Type":"ContainerStarted","Data":"afe8de1664a9219205c479635116013114610b3caf7ca3a213b4bbd6b8b6e8ff"} Nov 28 16:49:50 crc kubenswrapper[4884]: I1128 16:49:50.465349 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cgz8w" event={"ID":"10baaa35-44ec-4071-b281-56d439579fdd","Type":"ContainerStarted","Data":"d58c8275d5e5c8c84259e6341fe42c69a9879918ff5a0eaff1135ef448e0409f"} Nov 28 16:49:50 crc kubenswrapper[4884]: I1128 16:49:50.491234 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-cgz8w" podStartSLOduration=2.4912111550000002 podStartE2EDuration="2.491211155s" podCreationTimestamp="2025-11-28 16:49:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:50.485802712 +0000 UTC m=+5430.048586523" watchObservedRunningTime="2025-11-28 16:49:50.491211155 +0000 UTC m=+5430.053994956" Nov 28 16:49:51 crc kubenswrapper[4884]: I1128 16:49:51.955605 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.017680 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.017930 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="dnsmasq-dns" containerID="cri-o://768cd946d195b086b527d9cb598392ceccd0270745f82dc10160d86ba39211de" gracePeriod=10 Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.484135 4884 generic.go:334] "Generic (PLEG): container finished" podID="10baaa35-44ec-4071-b281-56d439579fdd" containerID="d58c8275d5e5c8c84259e6341fe42c69a9879918ff5a0eaff1135ef448e0409f" exitCode=0 Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.484344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cgz8w" event={"ID":"10baaa35-44ec-4071-b281-56d439579fdd","Type":"ContainerDied","Data":"d58c8275d5e5c8c84259e6341fe42c69a9879918ff5a0eaff1135ef448e0409f"} Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.489424 4884 generic.go:334] "Generic (PLEG): container finished" podID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerID="768cd946d195b086b527d9cb598392ceccd0270745f82dc10160d86ba39211de" exitCode=0 Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.489487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" event={"ID":"569fa6b0-ccc4-491e-bf05-9e9447a52429","Type":"ContainerDied","Data":"768cd946d195b086b527d9cb598392ceccd0270745f82dc10160d86ba39211de"} Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.489510 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" event={"ID":"569fa6b0-ccc4-491e-bf05-9e9447a52429","Type":"ContainerDied","Data":"c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94"} Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.489520 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c06b52b67c3be49519de186840f14bf631a9cca04bf84afa2c435058fd365a94" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.519652 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.589941 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config\") pod \"569fa6b0-ccc4-491e-bf05-9e9447a52429\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.590040 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc\") pod \"569fa6b0-ccc4-491e-bf05-9e9447a52429\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.590195 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9jmm\" (UniqueName: \"kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm\") pod \"569fa6b0-ccc4-491e-bf05-9e9447a52429\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.590222 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb\") pod \"569fa6b0-ccc4-491e-bf05-9e9447a52429\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.590974 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb\") pod \"569fa6b0-ccc4-491e-bf05-9e9447a52429\" (UID: \"569fa6b0-ccc4-491e-bf05-9e9447a52429\") " Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.597707 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm" (OuterVolumeSpecName: "kube-api-access-r9jmm") pod "569fa6b0-ccc4-491e-bf05-9e9447a52429" (UID: "569fa6b0-ccc4-491e-bf05-9e9447a52429"). InnerVolumeSpecName "kube-api-access-r9jmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.631954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "569fa6b0-ccc4-491e-bf05-9e9447a52429" (UID: "569fa6b0-ccc4-491e-bf05-9e9447a52429"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.633650 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config" (OuterVolumeSpecName: "config") pod "569fa6b0-ccc4-491e-bf05-9e9447a52429" (UID: "569fa6b0-ccc4-491e-bf05-9e9447a52429"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.639913 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "569fa6b0-ccc4-491e-bf05-9e9447a52429" (UID: "569fa6b0-ccc4-491e-bf05-9e9447a52429"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.646908 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "569fa6b0-ccc4-491e-bf05-9e9447a52429" (UID: "569fa6b0-ccc4-491e-bf05-9e9447a52429"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.693348 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.693380 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.693390 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9jmm\" (UniqueName: \"kubernetes.io/projected/569fa6b0-ccc4-491e-bf05-9e9447a52429-kube-api-access-r9jmm\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.693400 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:52 crc kubenswrapper[4884]: I1128 16:49:52.693408 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/569fa6b0-ccc4-491e-bf05-9e9447a52429-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.498286 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6669754c7-ml5pn" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.534951 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.544055 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6669754c7-ml5pn"] Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.783980 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.919778 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.919862 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.919901 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.919959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g87hx\" (UniqueName: \"kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.920004 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.920082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts\") pod \"10baaa35-44ec-4071-b281-56d439579fdd\" (UID: \"10baaa35-44ec-4071-b281-56d439579fdd\") " Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.926275 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.926409 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts" (OuterVolumeSpecName: "scripts") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.926440 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.927812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx" (OuterVolumeSpecName: "kube-api-access-g87hx") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "kube-api-access-g87hx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.967716 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:53 crc kubenswrapper[4884]: I1128 16:49:53.975027 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data" (OuterVolumeSpecName: "config-data") pod "10baaa35-44ec-4071-b281-56d439579fdd" (UID: "10baaa35-44ec-4071-b281-56d439579fdd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022257 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022295 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022308 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022322 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022334 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g87hx\" (UniqueName: \"kubernetes.io/projected/10baaa35-44ec-4071-b281-56d439579fdd-kube-api-access-g87hx\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.022344 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10baaa35-44ec-4071-b281-56d439579fdd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.511314 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-cgz8w" event={"ID":"10baaa35-44ec-4071-b281-56d439579fdd","Type":"ContainerDied","Data":"afe8de1664a9219205c479635116013114610b3caf7ca3a213b4bbd6b8b6e8ff"} Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.511572 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afe8de1664a9219205c479635116013114610b3caf7ca3a213b4bbd6b8b6e8ff" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.511376 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-cgz8w" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605294 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8588877946-hnncc"] Nov 28 16:49:54 crc kubenswrapper[4884]: E1128 16:49:54.605653 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="init" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605673 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="init" Nov 28 16:49:54 crc kubenswrapper[4884]: E1128 16:49:54.605697 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="dnsmasq-dns" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605706 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="dnsmasq-dns" Nov 28 16:49:54 crc kubenswrapper[4884]: E1128 16:49:54.605732 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10baaa35-44ec-4071-b281-56d439579fdd" containerName="keystone-bootstrap" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605739 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="10baaa35-44ec-4071-b281-56d439579fdd" containerName="keystone-bootstrap" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605895 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" containerName="dnsmasq-dns" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.605904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="10baaa35-44ec-4071-b281-56d439579fdd" containerName="keystone-bootstrap" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.606527 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.609132 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.613741 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.613761 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-hql4x" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.614160 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.625456 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8588877946-hnncc"] Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.697237 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="569fa6b0-ccc4-491e-bf05-9e9447a52429" path="/var/lib/kubelet/pods/569fa6b0-ccc4-491e-bf05-9e9447a52429/volumes" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747263 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-fernet-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-combined-ca-bundle\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747368 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-credential-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747732 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-config-data\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-scripts\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.747923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2t4b4\" (UniqueName: \"kubernetes.io/projected/bfccd8a8-1ad7-4c08-85ae-b826a0309318-kube-api-access-2t4b4\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-config-data\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849138 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-scripts\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849167 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2t4b4\" (UniqueName: \"kubernetes.io/projected/bfccd8a8-1ad7-4c08-85ae-b826a0309318-kube-api-access-2t4b4\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849200 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-fernet-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849231 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-combined-ca-bundle\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.849296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-credential-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.854965 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-combined-ca-bundle\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.854975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-fernet-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.856025 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-config-data\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.858558 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-credential-keys\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.858716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfccd8a8-1ad7-4c08-85ae-b826a0309318-scripts\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.869663 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2t4b4\" (UniqueName: \"kubernetes.io/projected/bfccd8a8-1ad7-4c08-85ae-b826a0309318-kube-api-access-2t4b4\") pod \"keystone-8588877946-hnncc\" (UID: \"bfccd8a8-1ad7-4c08-85ae-b826a0309318\") " pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:54 crc kubenswrapper[4884]: I1128 16:49:54.946131 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:55 crc kubenswrapper[4884]: I1128 16:49:55.173204 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8588877946-hnncc"] Nov 28 16:49:55 crc kubenswrapper[4884]: W1128 16:49:55.186694 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfccd8a8_1ad7_4c08_85ae_b826a0309318.slice/crio-00c311fb293bdc9240f4075f684f7fd4308459bedccc4c1085cb013cca68dc4e WatchSource:0}: Error finding container 00c311fb293bdc9240f4075f684f7fd4308459bedccc4c1085cb013cca68dc4e: Status 404 returned error can't find the container with id 00c311fb293bdc9240f4075f684f7fd4308459bedccc4c1085cb013cca68dc4e Nov 28 16:49:55 crc kubenswrapper[4884]: I1128 16:49:55.519413 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8588877946-hnncc" event={"ID":"bfccd8a8-1ad7-4c08-85ae-b826a0309318","Type":"ContainerStarted","Data":"1197f4bc1adc613cdd0d7fbad1845f96d6082b49f10474853af3507ab203aced"} Nov 28 16:49:55 crc kubenswrapper[4884]: I1128 16:49:55.519728 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8588877946-hnncc" event={"ID":"bfccd8a8-1ad7-4c08-85ae-b826a0309318","Type":"ContainerStarted","Data":"00c311fb293bdc9240f4075f684f7fd4308459bedccc4c1085cb013cca68dc4e"} Nov 28 16:49:55 crc kubenswrapper[4884]: I1128 16:49:55.519858 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-8588877946-hnncc" Nov 28 16:49:55 crc kubenswrapper[4884]: I1128 16:49:55.535429 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-8588877946-hnncc" podStartSLOduration=1.5354107830000001 podStartE2EDuration="1.535410783s" podCreationTimestamp="2025-11-28 16:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:49:55.53326909 +0000 UTC m=+5435.096052891" watchObservedRunningTime="2025-11-28 16:49:55.535410783 +0000 UTC m=+5435.098194584" Nov 28 16:50:26 crc kubenswrapper[4884]: I1128 16:50:26.572544 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-8588877946-hnncc" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.500409 4884 scope.go:117] "RemoveContainer" containerID="412f6073d42f28ed9ba04594734f40f801bc5cb19bcc11b567a9fadf61e163ac" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.528722 4884 scope.go:117] "RemoveContainer" containerID="17423c1cd46a1da152bb91106b9cec01a1d8b43a02a6861f0b5054d6f22d3574" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.597395 4884 scope.go:117] "RemoveContainer" containerID="f179e1434cfb151c75c1805f811f29fb70b5cced41f1dc52764a6e7cffbc1ea5" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.620024 4884 scope.go:117] "RemoveContainer" containerID="73c47078a07737ca8138397208886f8fb6add579c2d45eec3d0b4f356df1c0eb" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.652428 4884 scope.go:117] "RemoveContainer" containerID="1976f4104086b69cdd0b99e5be0cab7df2f6c84304c2156f73732e15f9235f68" Nov 28 16:50:29 crc kubenswrapper[4884]: I1128 16:50:29.696139 4884 scope.go:117] "RemoveContainer" containerID="c0ca10b0cf5fafc66a0acfe38de6b08006d77ab173bd243c4cf1d504db70b7d7" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.651077 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.652999 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.655252 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.658895 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.659306 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-n47d7" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.669785 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.789329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.789389 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5lqz\" (UniqueName: \"kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.789444 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.891034 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5lqz\" (UniqueName: \"kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.891126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.891254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.892041 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.897581 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.907431 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5lqz\" (UniqueName: \"kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz\") pod \"openstackclient\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " pod="openstack/openstackclient" Nov 28 16:50:30 crc kubenswrapper[4884]: I1128 16:50:30.972943 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 16:50:31 crc kubenswrapper[4884]: I1128 16:50:31.395165 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 16:50:31 crc kubenswrapper[4884]: W1128 16:50:31.400446 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdccee80_ab8b_44a4_999d_e6cc64d2cc98.slice/crio-dac73258073d79adffa0ccc7631a24c40091d88eff32173c0d9680bf4d75cea5 WatchSource:0}: Error finding container dac73258073d79adffa0ccc7631a24c40091d88eff32173c0d9680bf4d75cea5: Status 404 returned error can't find the container with id dac73258073d79adffa0ccc7631a24c40091d88eff32173c0d9680bf4d75cea5 Nov 28 16:50:31 crc kubenswrapper[4884]: I1128 16:50:31.917828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"fdccee80-ab8b-44a4-999d-e6cc64d2cc98","Type":"ContainerStarted","Data":"08a07c4deb7f3e926d336ec4afa1ce7ca389b2feafe1d993207d59d1de597054"} Nov 28 16:50:31 crc kubenswrapper[4884]: I1128 16:50:31.918367 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"fdccee80-ab8b-44a4-999d-e6cc64d2cc98","Type":"ContainerStarted","Data":"dac73258073d79adffa0ccc7631a24c40091d88eff32173c0d9680bf4d75cea5"} Nov 28 16:50:31 crc kubenswrapper[4884]: I1128 16:50:31.944191 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.944171342 podStartE2EDuration="1.944171342s" podCreationTimestamp="2025-11-28 16:50:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:50:31.936641648 +0000 UTC m=+5471.499425489" watchObservedRunningTime="2025-11-28 16:50:31.944171342 +0000 UTC m=+5471.506955143" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.096015 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.098955 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.106761 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.240938 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.241019 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbj4l\" (UniqueName: \"kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.241115 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.342567 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.342644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.342710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbj4l\" (UniqueName: \"kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.343163 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.343189 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.361410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbj4l\" (UniqueName: \"kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l\") pod \"redhat-operators-ws5pb\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.440414 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:44 crc kubenswrapper[4884]: I1128 16:50:44.860254 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:50:44 crc kubenswrapper[4884]: W1128 16:50:44.862761 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ce9f9a9_6713_4659_848b_a9040c68ca63.slice/crio-9016d9e0b435fc2da796930dc65921de211757fa5dfb4823ff8bb336b9be2a5f WatchSource:0}: Error finding container 9016d9e0b435fc2da796930dc65921de211757fa5dfb4823ff8bb336b9be2a5f: Status 404 returned error can't find the container with id 9016d9e0b435fc2da796930dc65921de211757fa5dfb4823ff8bb336b9be2a5f Nov 28 16:50:45 crc kubenswrapper[4884]: I1128 16:50:45.046685 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerStarted","Data":"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea"} Nov 28 16:50:45 crc kubenswrapper[4884]: I1128 16:50:45.047111 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerStarted","Data":"9016d9e0b435fc2da796930dc65921de211757fa5dfb4823ff8bb336b9be2a5f"} Nov 28 16:50:46 crc kubenswrapper[4884]: I1128 16:50:46.062465 4884 generic.go:334] "Generic (PLEG): container finished" podID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerID="bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea" exitCode=0 Nov 28 16:50:46 crc kubenswrapper[4884]: I1128 16:50:46.062521 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerDied","Data":"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea"} Nov 28 16:50:46 crc kubenswrapper[4884]: I1128 16:50:46.067376 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:50:48 crc kubenswrapper[4884]: I1128 16:50:48.085907 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerStarted","Data":"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a"} Nov 28 16:50:49 crc kubenswrapper[4884]: I1128 16:50:49.093565 4884 generic.go:334] "Generic (PLEG): container finished" podID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerID="9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a" exitCode=0 Nov 28 16:50:49 crc kubenswrapper[4884]: I1128 16:50:49.093624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerDied","Data":"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a"} Nov 28 16:50:50 crc kubenswrapper[4884]: I1128 16:50:50.107885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerStarted","Data":"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16"} Nov 28 16:50:50 crc kubenswrapper[4884]: I1128 16:50:50.143915 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ws5pb" podStartSLOduration=2.6562400950000002 podStartE2EDuration="6.143886383s" podCreationTimestamp="2025-11-28 16:50:44 +0000 UTC" firstStartedPulling="2025-11-28 16:50:46.066799091 +0000 UTC m=+5485.629582922" lastFinishedPulling="2025-11-28 16:50:49.554445419 +0000 UTC m=+5489.117229210" observedRunningTime="2025-11-28 16:50:50.127078482 +0000 UTC m=+5489.689862303" watchObservedRunningTime="2025-11-28 16:50:50.143886383 +0000 UTC m=+5489.706670224" Nov 28 16:50:54 crc kubenswrapper[4884]: I1128 16:50:54.444018 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:54 crc kubenswrapper[4884]: I1128 16:50:54.444626 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:50:55 crc kubenswrapper[4884]: I1128 16:50:55.488607 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ws5pb" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="registry-server" probeResult="failure" output=< Nov 28 16:50:55 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 16:50:55 crc kubenswrapper[4884]: > Nov 28 16:51:04 crc kubenswrapper[4884]: I1128 16:51:04.497452 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:51:04 crc kubenswrapper[4884]: I1128 16:51:04.542134 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:51:04 crc kubenswrapper[4884]: I1128 16:51:04.733761 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.268521 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ws5pb" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="registry-server" containerID="cri-o://3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16" gracePeriod=2 Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.771031 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.831171 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbj4l\" (UniqueName: \"kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l\") pod \"5ce9f9a9-6713-4659-848b-a9040c68ca63\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.831298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities\") pod \"5ce9f9a9-6713-4659-848b-a9040c68ca63\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.831409 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content\") pod \"5ce9f9a9-6713-4659-848b-a9040c68ca63\" (UID: \"5ce9f9a9-6713-4659-848b-a9040c68ca63\") " Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.832937 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities" (OuterVolumeSpecName: "utilities") pod "5ce9f9a9-6713-4659-848b-a9040c68ca63" (UID: "5ce9f9a9-6713-4659-848b-a9040c68ca63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.845487 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l" (OuterVolumeSpecName: "kube-api-access-vbj4l") pod "5ce9f9a9-6713-4659-848b-a9040c68ca63" (UID: "5ce9f9a9-6713-4659-848b-a9040c68ca63"). InnerVolumeSpecName "kube-api-access-vbj4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.933769 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.934363 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbj4l\" (UniqueName: \"kubernetes.io/projected/5ce9f9a9-6713-4659-848b-a9040c68ca63-kube-api-access-vbj4l\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:06 crc kubenswrapper[4884]: I1128 16:51:06.961644 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ce9f9a9-6713-4659-848b-a9040c68ca63" (UID: "5ce9f9a9-6713-4659-848b-a9040c68ca63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.037035 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ce9f9a9-6713-4659-848b-a9040c68ca63-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.280975 4884 generic.go:334] "Generic (PLEG): container finished" podID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerID="3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16" exitCode=0 Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.281028 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerDied","Data":"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16"} Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.281047 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ws5pb" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.281073 4884 scope.go:117] "RemoveContainer" containerID="3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.281058 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ws5pb" event={"ID":"5ce9f9a9-6713-4659-848b-a9040c68ca63","Type":"ContainerDied","Data":"9016d9e0b435fc2da796930dc65921de211757fa5dfb4823ff8bb336b9be2a5f"} Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.306760 4884 scope.go:117] "RemoveContainer" containerID="9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.324312 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.331823 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ws5pb"] Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.363298 4884 scope.go:117] "RemoveContainer" containerID="bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.393718 4884 scope.go:117] "RemoveContainer" containerID="3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16" Nov 28 16:51:07 crc kubenswrapper[4884]: E1128 16:51:07.394736 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16\": container with ID starting with 3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16 not found: ID does not exist" containerID="3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.394784 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16"} err="failed to get container status \"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16\": rpc error: code = NotFound desc = could not find container \"3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16\": container with ID starting with 3649cc2f3bbf8797af6c02690d4580d3e678c75fac61cd45685495ee669c6d16 not found: ID does not exist" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.394814 4884 scope.go:117] "RemoveContainer" containerID="9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a" Nov 28 16:51:07 crc kubenswrapper[4884]: E1128 16:51:07.395178 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a\": container with ID starting with 9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a not found: ID does not exist" containerID="9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.395215 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a"} err="failed to get container status \"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a\": rpc error: code = NotFound desc = could not find container \"9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a\": container with ID starting with 9db7e182bf68827e63262a093db88338dad1a37277330b654714b57deb44b25a not found: ID does not exist" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.395237 4884 scope.go:117] "RemoveContainer" containerID="bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea" Nov 28 16:51:07 crc kubenswrapper[4884]: E1128 16:51:07.395570 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea\": container with ID starting with bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea not found: ID does not exist" containerID="bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea" Nov 28 16:51:07 crc kubenswrapper[4884]: I1128 16:51:07.395610 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea"} err="failed to get container status \"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea\": rpc error: code = NotFound desc = could not find container \"bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea\": container with ID starting with bee0bf5a85bbfcbb195787dc91d06d00e4eeab576edb4872d51f38b0cb9bc3ea not found: ID does not exist" Nov 28 16:51:08 crc kubenswrapper[4884]: I1128 16:51:08.698724 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" path="/var/lib/kubelet/pods/5ce9f9a9-6713-4659-848b-a9040c68ca63/volumes" Nov 28 16:51:51 crc kubenswrapper[4884]: I1128 16:51:51.243440 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:51:51 crc kubenswrapper[4884]: I1128 16:51:51.243876 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.207395 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-k9vr2"] Nov 28 16:52:09 crc kubenswrapper[4884]: E1128 16:52:09.208238 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="registry-server" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.208252 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="registry-server" Nov 28 16:52:09 crc kubenswrapper[4884]: E1128 16:52:09.208271 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="extract-content" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.208279 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="extract-content" Nov 28 16:52:09 crc kubenswrapper[4884]: E1128 16:52:09.208299 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="extract-utilities" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.208306 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="extract-utilities" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.208502 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ce9f9a9-6713-4659-848b-a9040c68ca63" containerName="registry-server" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.209079 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.221034 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k9vr2"] Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.308160 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqw44\" (UniqueName: \"kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44\") pod \"barbican-db-create-k9vr2\" (UID: \"86c08ed3-aae0-4c31-8c10-182e580b68e0\") " pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.409841 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqw44\" (UniqueName: \"kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44\") pod \"barbican-db-create-k9vr2\" (UID: \"86c08ed3-aae0-4c31-8c10-182e580b68e0\") " pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.438140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqw44\" (UniqueName: \"kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44\") pod \"barbican-db-create-k9vr2\" (UID: \"86c08ed3-aae0-4c31-8c10-182e580b68e0\") " pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.531124 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:09 crc kubenswrapper[4884]: I1128 16:52:09.990637 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k9vr2"] Nov 28 16:52:09 crc kubenswrapper[4884]: W1128 16:52:09.996976 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86c08ed3_aae0_4c31_8c10_182e580b68e0.slice/crio-40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b WatchSource:0}: Error finding container 40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b: Status 404 returned error can't find the container with id 40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b Nov 28 16:52:10 crc kubenswrapper[4884]: I1128 16:52:10.850631 4884 generic.go:334] "Generic (PLEG): container finished" podID="86c08ed3-aae0-4c31-8c10-182e580b68e0" containerID="bb04380908c36683bd8fdfd47caf564a33f756299a4e4e720d4f56dc994517e6" exitCode=0 Nov 28 16:52:10 crc kubenswrapper[4884]: I1128 16:52:10.850703 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k9vr2" event={"ID":"86c08ed3-aae0-4c31-8c10-182e580b68e0","Type":"ContainerDied","Data":"bb04380908c36683bd8fdfd47caf564a33f756299a4e4e720d4f56dc994517e6"} Nov 28 16:52:10 crc kubenswrapper[4884]: I1128 16:52:10.850940 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k9vr2" event={"ID":"86c08ed3-aae0-4c31-8c10-182e580b68e0","Type":"ContainerStarted","Data":"40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b"} Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.149425 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.251774 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqw44\" (UniqueName: \"kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44\") pod \"86c08ed3-aae0-4c31-8c10-182e580b68e0\" (UID: \"86c08ed3-aae0-4c31-8c10-182e580b68e0\") " Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.257784 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44" (OuterVolumeSpecName: "kube-api-access-vqw44") pod "86c08ed3-aae0-4c31-8c10-182e580b68e0" (UID: "86c08ed3-aae0-4c31-8c10-182e580b68e0"). InnerVolumeSpecName "kube-api-access-vqw44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.353471 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqw44\" (UniqueName: \"kubernetes.io/projected/86c08ed3-aae0-4c31-8c10-182e580b68e0-kube-api-access-vqw44\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.868135 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k9vr2" event={"ID":"86c08ed3-aae0-4c31-8c10-182e580b68e0","Type":"ContainerDied","Data":"40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b"} Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.868170 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40bd957bbaca8c271a7b85f111885c5cfc846e5444b135b13de0eeb04227bf0b" Nov 28 16:52:12 crc kubenswrapper[4884]: I1128 16:52:12.868192 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k9vr2" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.208145 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-7cc3-account-create-glf7r"] Nov 28 16:52:19 crc kubenswrapper[4884]: E1128 16:52:19.209134 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86c08ed3-aae0-4c31-8c10-182e580b68e0" containerName="mariadb-database-create" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.209151 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="86c08ed3-aae0-4c31-8c10-182e580b68e0" containerName="mariadb-database-create" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.209361 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="86c08ed3-aae0-4c31-8c10-182e580b68e0" containerName="mariadb-database-create" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.210027 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.216373 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7cc3-account-create-glf7r"] Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.257453 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.368585 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdt4g\" (UniqueName: \"kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g\") pod \"barbican-7cc3-account-create-glf7r\" (UID: \"814ec177-1088-4a0f-bda9-ee71c9cfa6eb\") " pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.470416 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdt4g\" (UniqueName: \"kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g\") pod \"barbican-7cc3-account-create-glf7r\" (UID: \"814ec177-1088-4a0f-bda9-ee71c9cfa6eb\") " pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.489639 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdt4g\" (UniqueName: \"kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g\") pod \"barbican-7cc3-account-create-glf7r\" (UID: \"814ec177-1088-4a0f-bda9-ee71c9cfa6eb\") " pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:19 crc kubenswrapper[4884]: I1128 16:52:19.579336 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:20 crc kubenswrapper[4884]: I1128 16:52:20.177808 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-7cc3-account-create-glf7r"] Nov 28 16:52:20 crc kubenswrapper[4884]: I1128 16:52:20.933793 4884 generic.go:334] "Generic (PLEG): container finished" podID="814ec177-1088-4a0f-bda9-ee71c9cfa6eb" containerID="70e1168ea111bc97ffeb74f058eb86f3defc92c7b998be4b97f2a0a4a1243a28" exitCode=0 Nov 28 16:52:20 crc kubenswrapper[4884]: I1128 16:52:20.934024 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7cc3-account-create-glf7r" event={"ID":"814ec177-1088-4a0f-bda9-ee71c9cfa6eb","Type":"ContainerDied","Data":"70e1168ea111bc97ffeb74f058eb86f3defc92c7b998be4b97f2a0a4a1243a28"} Nov 28 16:52:20 crc kubenswrapper[4884]: I1128 16:52:20.934140 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7cc3-account-create-glf7r" event={"ID":"814ec177-1088-4a0f-bda9-ee71c9cfa6eb","Type":"ContainerStarted","Data":"24dcba159a2090aeea7f64bca19ad878c1e513834b233070520074d8d79a11be"} Nov 28 16:52:21 crc kubenswrapper[4884]: I1128 16:52:21.242537 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:52:21 crc kubenswrapper[4884]: I1128 16:52:21.242613 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.286203 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.421009 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdt4g\" (UniqueName: \"kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g\") pod \"814ec177-1088-4a0f-bda9-ee71c9cfa6eb\" (UID: \"814ec177-1088-4a0f-bda9-ee71c9cfa6eb\") " Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.429804 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g" (OuterVolumeSpecName: "kube-api-access-sdt4g") pod "814ec177-1088-4a0f-bda9-ee71c9cfa6eb" (UID: "814ec177-1088-4a0f-bda9-ee71c9cfa6eb"). InnerVolumeSpecName "kube-api-access-sdt4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.522698 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdt4g\" (UniqueName: \"kubernetes.io/projected/814ec177-1088-4a0f-bda9-ee71c9cfa6eb-kube-api-access-sdt4g\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.951189 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-7cc3-account-create-glf7r" event={"ID":"814ec177-1088-4a0f-bda9-ee71c9cfa6eb","Type":"ContainerDied","Data":"24dcba159a2090aeea7f64bca19ad878c1e513834b233070520074d8d79a11be"} Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.951228 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24dcba159a2090aeea7f64bca19ad878c1e513834b233070520074d8d79a11be" Nov 28 16:52:22 crc kubenswrapper[4884]: I1128 16:52:22.951242 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-7cc3-account-create-glf7r" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.511170 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-gkc4s"] Nov 28 16:52:24 crc kubenswrapper[4884]: E1128 16:52:24.511522 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="814ec177-1088-4a0f-bda9-ee71c9cfa6eb" containerName="mariadb-account-create" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.511536 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="814ec177-1088-4a0f-bda9-ee71c9cfa6eb" containerName="mariadb-account-create" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.511708 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="814ec177-1088-4a0f-bda9-ee71c9cfa6eb" containerName="mariadb-account-create" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.512273 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.515542 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bcmr9" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.516044 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.534985 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gkc4s"] Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.667063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.667148 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8p2k\" (UniqueName: \"kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.667224 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.768860 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.768911 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8p2k\" (UniqueName: \"kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.768997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.774048 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.776587 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.787599 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8p2k\" (UniqueName: \"kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k\") pod \"barbican-db-sync-gkc4s\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:24 crc kubenswrapper[4884]: I1128 16:52:24.842228 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:25 crc kubenswrapper[4884]: I1128 16:52:25.322690 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-gkc4s"] Nov 28 16:52:25 crc kubenswrapper[4884]: I1128 16:52:25.973192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gkc4s" event={"ID":"ac7b1d50-de55-4c74-a567-e14c00170191","Type":"ContainerStarted","Data":"11cd64f702099bf1a995714987d4b861e5b308b2275edf37d9b80f933fb4233c"} Nov 28 16:52:25 crc kubenswrapper[4884]: I1128 16:52:25.973548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gkc4s" event={"ID":"ac7b1d50-de55-4c74-a567-e14c00170191","Type":"ContainerStarted","Data":"a6605251f08af1446e298a6e054a7df376d2dacb84721fddad156960143d4296"} Nov 28 16:52:25 crc kubenswrapper[4884]: I1128 16:52:25.998650 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-gkc4s" podStartSLOduration=1.9986293499999999 podStartE2EDuration="1.99862935s" podCreationTimestamp="2025-11-28 16:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:52:25.989030745 +0000 UTC m=+5585.551814546" watchObservedRunningTime="2025-11-28 16:52:25.99862935 +0000 UTC m=+5585.561413151" Nov 28 16:52:26 crc kubenswrapper[4884]: I1128 16:52:26.984438 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac7b1d50-de55-4c74-a567-e14c00170191" containerID="11cd64f702099bf1a995714987d4b861e5b308b2275edf37d9b80f933fb4233c" exitCode=0 Nov 28 16:52:26 crc kubenswrapper[4884]: I1128 16:52:26.984817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gkc4s" event={"ID":"ac7b1d50-de55-4c74-a567-e14c00170191","Type":"ContainerDied","Data":"11cd64f702099bf1a995714987d4b861e5b308b2275edf37d9b80f933fb4233c"} Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.290303 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.430441 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8p2k\" (UniqueName: \"kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k\") pod \"ac7b1d50-de55-4c74-a567-e14c00170191\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.430577 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data\") pod \"ac7b1d50-de55-4c74-a567-e14c00170191\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.430802 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle\") pod \"ac7b1d50-de55-4c74-a567-e14c00170191\" (UID: \"ac7b1d50-de55-4c74-a567-e14c00170191\") " Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.436208 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ac7b1d50-de55-4c74-a567-e14c00170191" (UID: "ac7b1d50-de55-4c74-a567-e14c00170191"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.436227 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k" (OuterVolumeSpecName: "kube-api-access-f8p2k") pod "ac7b1d50-de55-4c74-a567-e14c00170191" (UID: "ac7b1d50-de55-4c74-a567-e14c00170191"). InnerVolumeSpecName "kube-api-access-f8p2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.461798 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac7b1d50-de55-4c74-a567-e14c00170191" (UID: "ac7b1d50-de55-4c74-a567-e14c00170191"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.532196 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.532225 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac7b1d50-de55-4c74-a567-e14c00170191-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:28 crc kubenswrapper[4884]: I1128 16:52:28.532233 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8p2k\" (UniqueName: \"kubernetes.io/projected/ac7b1d50-de55-4c74-a567-e14c00170191-kube-api-access-f8p2k\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.048009 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-gkc4s" event={"ID":"ac7b1d50-de55-4c74-a567-e14c00170191","Type":"ContainerDied","Data":"a6605251f08af1446e298a6e054a7df376d2dacb84721fddad156960143d4296"} Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.048049 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6605251f08af1446e298a6e054a7df376d2dacb84721fddad156960143d4296" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.069254 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-gkc4s" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.251325 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-59676fff-jblk7"] Nov 28 16:52:29 crc kubenswrapper[4884]: E1128 16:52:29.251734 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac7b1d50-de55-4c74-a567-e14c00170191" containerName="barbican-db-sync" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.251749 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac7b1d50-de55-4c74-a567-e14c00170191" containerName="barbican-db-sync" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.251908 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac7b1d50-de55-4c74-a567-e14c00170191" containerName="barbican-db-sync" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.252825 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.257561 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.257860 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bcmr9" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.258249 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.266305 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7bbd9868f8-pwsck"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.267753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.274483 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.276771 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7bbd9868f8-pwsck"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.297892 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-59676fff-jblk7"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.392473 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.394534 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.402439 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.448927 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbww7\" (UniqueName: \"kubernetes.io/projected/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-kube-api-access-bbww7\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-combined-ca-bundle\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449059 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-logs\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449112 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data-custom\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449139 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449194 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449280 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data-custom\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449315 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzxbf\" (UniqueName: \"kubernetes.io/projected/d7e17112-8210-4348-a0c8-16845aaf9633-kube-api-access-xzxbf\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7e17112-8210-4348-a0c8-16845aaf9633-logs\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.449363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-combined-ca-bundle\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.514408 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-77fc564658-4lcch"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.516154 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.523426 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.536423 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77fc564658-4lcch"] Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554454 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554562 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data-custom\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554595 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzxbf\" (UniqueName: \"kubernetes.io/projected/d7e17112-8210-4348-a0c8-16845aaf9633-kube-api-access-xzxbf\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554615 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7e17112-8210-4348-a0c8-16845aaf9633-logs\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-combined-ca-bundle\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554665 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jps2l\" (UniqueName: \"kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554711 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbww7\" (UniqueName: \"kubernetes.io/projected/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-kube-api-access-bbww7\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554736 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554762 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-combined-ca-bundle\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-combined-ca-bundle\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554832 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data-custom\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554856 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-logs\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554892 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data-custom\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554913 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.554946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.555003 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68374775-daef-4964-ad6c-7c9411287fdd-logs\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.555044 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkrjq\" (UniqueName: \"kubernetes.io/projected/68374775-daef-4964-ad6c-7c9411287fdd-kube-api-access-dkrjq\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.555080 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.555157 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.555179 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.558529 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-logs\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.562047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data-custom\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.562697 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7e17112-8210-4348-a0c8-16845aaf9633-logs\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.568838 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-combined-ca-bundle\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.569042 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7e17112-8210-4348-a0c8-16845aaf9633-config-data\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.571518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-combined-ca-bundle\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.572381 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data-custom\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.578935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-config-data\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.582633 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbww7\" (UniqueName: \"kubernetes.io/projected/6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1-kube-api-access-bbww7\") pod \"barbican-keystone-listener-7bbd9868f8-pwsck\" (UID: \"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1\") " pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.585972 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzxbf\" (UniqueName: \"kubernetes.io/projected/d7e17112-8210-4348-a0c8-16845aaf9633-kube-api-access-xzxbf\") pod \"barbican-worker-59676fff-jblk7\" (UID: \"d7e17112-8210-4348-a0c8-16845aaf9633\") " pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.606113 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657227 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-combined-ca-bundle\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data-custom\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657295 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657322 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68374775-daef-4964-ad6c-7c9411287fdd-logs\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657347 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkrjq\" (UniqueName: \"kubernetes.io/projected/68374775-daef-4964-ad6c-7c9411287fdd-kube-api-access-dkrjq\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657384 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657425 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.657467 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jps2l\" (UniqueName: \"kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.663083 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.666574 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/68374775-daef-4964-ad6c-7c9411287fdd-logs\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.672249 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.672909 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.673051 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.674951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-combined-ca-bundle\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.675467 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.679004 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/68374775-daef-4964-ad6c-7c9411287fdd-config-data-custom\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.680076 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jps2l\" (UniqueName: \"kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l\") pod \"dnsmasq-dns-5597774b69-l64p5\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.683805 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkrjq\" (UniqueName: \"kubernetes.io/projected/68374775-daef-4964-ad6c-7c9411287fdd-kube-api-access-dkrjq\") pod \"barbican-api-77fc564658-4lcch\" (UID: \"68374775-daef-4964-ad6c-7c9411287fdd\") " pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.711733 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.836493 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:29 crc kubenswrapper[4884]: I1128 16:52:29.876274 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-59676fff-jblk7" Nov 28 16:52:30 crc kubenswrapper[4884]: I1128 16:52:30.203155 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7bbd9868f8-pwsck"] Nov 28 16:52:30 crc kubenswrapper[4884]: W1128 16:52:30.214389 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ffb554a_f0b6_4723_aa14_ce6b4f46bbf1.slice/crio-f3c7aaed55893b515d1f2af7a07999be732fa5dd332c6ae35ee16187cabc6df4 WatchSource:0}: Error finding container f3c7aaed55893b515d1f2af7a07999be732fa5dd332c6ae35ee16187cabc6df4: Status 404 returned error can't find the container with id f3c7aaed55893b515d1f2af7a07999be732fa5dd332c6ae35ee16187cabc6df4 Nov 28 16:52:30 crc kubenswrapper[4884]: I1128 16:52:30.275011 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:52:30 crc kubenswrapper[4884]: I1128 16:52:30.378830 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77fc564658-4lcch"] Nov 28 16:52:30 crc kubenswrapper[4884]: I1128 16:52:30.464910 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-59676fff-jblk7"] Nov 28 16:52:30 crc kubenswrapper[4884]: W1128 16:52:30.468251 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7e17112_8210_4348_a0c8_16845aaf9633.slice/crio-571102d5ed5ab2b3ac8536305aa27bc3084534f7bbeba2c6a59f917509bdaba3 WatchSource:0}: Error finding container 571102d5ed5ab2b3ac8536305aa27bc3084534f7bbeba2c6a59f917509bdaba3: Status 404 returned error can't find the container with id 571102d5ed5ab2b3ac8536305aa27bc3084534f7bbeba2c6a59f917509bdaba3 Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.075040 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59676fff-jblk7" event={"ID":"d7e17112-8210-4348-a0c8-16845aaf9633","Type":"ContainerStarted","Data":"571102d5ed5ab2b3ac8536305aa27bc3084534f7bbeba2c6a59f917509bdaba3"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.078145 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" event={"ID":"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1","Type":"ContainerStarted","Data":"bca514098339da5b85f2960db0549e726cc102f57db9366c56f1880db184f1d2"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.078273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" event={"ID":"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1","Type":"ContainerStarted","Data":"6709496252a3269ca081849f8dffe432daf506b5453056f7b0f74862d8b747c9"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.078351 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" event={"ID":"6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1","Type":"ContainerStarted","Data":"f3c7aaed55893b515d1f2af7a07999be732fa5dd332c6ae35ee16187cabc6df4"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.080856 4884 generic.go:334] "Generic (PLEG): container finished" podID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerID="e23ee8996afebdb91d6f7df1d8dc7b63ecb700858568750f9bcb14c28f9bdd61" exitCode=0 Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.081150 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5597774b69-l64p5" event={"ID":"7ad3690c-1e05-4bc8-bdf5-515df004cdbc","Type":"ContainerDied","Data":"e23ee8996afebdb91d6f7df1d8dc7b63ecb700858568750f9bcb14c28f9bdd61"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.081874 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5597774b69-l64p5" event={"ID":"7ad3690c-1e05-4bc8-bdf5-515df004cdbc","Type":"ContainerStarted","Data":"11678d463b21438d733afc5d0f2901fa4c21a7f9b849d735d53637fb63695da0"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.084201 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77fc564658-4lcch" event={"ID":"68374775-daef-4964-ad6c-7c9411287fdd","Type":"ContainerStarted","Data":"44aaab4a7882b4d8fc1f580e65ec1a7bce8f19cad78f1bfa4f3452a39551edf0"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.084278 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77fc564658-4lcch" event={"ID":"68374775-daef-4964-ad6c-7c9411287fdd","Type":"ContainerStarted","Data":"533eb65144cbbfc79112e56e0c569d755a7f5fb76d7d3e71e8315476ad952edb"} Nov 28 16:52:31 crc kubenswrapper[4884]: I1128 16:52:31.098838 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7bbd9868f8-pwsck" podStartSLOduration=2.098819177 podStartE2EDuration="2.098819177s" podCreationTimestamp="2025-11-28 16:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:52:31.096983913 +0000 UTC m=+5590.659767724" watchObservedRunningTime="2025-11-28 16:52:31.098819177 +0000 UTC m=+5590.661602988" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.098226 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59676fff-jblk7" event={"ID":"d7e17112-8210-4348-a0c8-16845aaf9633","Type":"ContainerStarted","Data":"77ed373f49ca2f88ec043790f8713b74ed69b05864e057b9bbc052be85b3e146"} Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.098592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-59676fff-jblk7" event={"ID":"d7e17112-8210-4348-a0c8-16845aaf9633","Type":"ContainerStarted","Data":"c82b87ea93fd6ab9d2c8dff9323067887fe3bbf768040cfeadfef131fb75210e"} Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.109341 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5597774b69-l64p5" event={"ID":"7ad3690c-1e05-4bc8-bdf5-515df004cdbc","Type":"ContainerStarted","Data":"b37da1f851af2084792cdfac45205855c64720046d2c0cb3a77f7b4a1c6100d5"} Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.109451 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.118307 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-59676fff-jblk7" podStartSLOduration=3.118285915 podStartE2EDuration="3.118285915s" podCreationTimestamp="2025-11-28 16:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:52:32.116585454 +0000 UTC m=+5591.679369265" watchObservedRunningTime="2025-11-28 16:52:32.118285915 +0000 UTC m=+5591.681069726" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.118531 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77fc564658-4lcch" event={"ID":"68374775-daef-4964-ad6c-7c9411287fdd","Type":"ContainerStarted","Data":"16f322c7c06b753055ce087cd51160a75b88bfd5c8a27bdcabb4e3228c47f23b"} Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.118597 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.118624 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.177350 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5597774b69-l64p5" podStartSLOduration=3.17732291 podStartE2EDuration="3.17732291s" podCreationTimestamp="2025-11-28 16:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:52:32.151260422 +0000 UTC m=+5591.714044233" watchObservedRunningTime="2025-11-28 16:52:32.17732291 +0000 UTC m=+5591.740106721" Nov 28 16:52:32 crc kubenswrapper[4884]: I1128 16:52:32.179815 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-77fc564658-4lcch" podStartSLOduration=3.17979241 podStartE2EDuration="3.17979241s" podCreationTimestamp="2025-11-28 16:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:52:32.170677888 +0000 UTC m=+5591.733461699" watchObservedRunningTime="2025-11-28 16:52:32.17979241 +0000 UTC m=+5591.742576211" Nov 28 16:52:39 crc kubenswrapper[4884]: I1128 16:52:39.714302 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:52:39 crc kubenswrapper[4884]: I1128 16:52:39.806689 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:52:39 crc kubenswrapper[4884]: I1128 16:52:39.806923 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="dnsmasq-dns" containerID="cri-o://e161aa2acb46e092ccf896af782644661e79c90b937e1fd680da7162138ebdd2" gracePeriod=10 Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.181775 4884 generic.go:334] "Generic (PLEG): container finished" podID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerID="e161aa2acb46e092ccf896af782644661e79c90b937e1fd680da7162138ebdd2" exitCode=0 Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.181813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" event={"ID":"846c0625-e92e-4fce-99c3-ad5934ac0c60","Type":"ContainerDied","Data":"e161aa2acb46e092ccf896af782644661e79c90b937e1fd680da7162138ebdd2"} Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.755114 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.878673 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb\") pod \"846c0625-e92e-4fce-99c3-ad5934ac0c60\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.878832 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8dmv\" (UniqueName: \"kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv\") pod \"846c0625-e92e-4fce-99c3-ad5934ac0c60\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.878889 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config\") pod \"846c0625-e92e-4fce-99c3-ad5934ac0c60\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.878926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc\") pod \"846c0625-e92e-4fce-99c3-ad5934ac0c60\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.878988 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb\") pod \"846c0625-e92e-4fce-99c3-ad5934ac0c60\" (UID: \"846c0625-e92e-4fce-99c3-ad5934ac0c60\") " Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.893441 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv" (OuterVolumeSpecName: "kube-api-access-r8dmv") pod "846c0625-e92e-4fce-99c3-ad5934ac0c60" (UID: "846c0625-e92e-4fce-99c3-ad5934ac0c60"). InnerVolumeSpecName "kube-api-access-r8dmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.925475 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "846c0625-e92e-4fce-99c3-ad5934ac0c60" (UID: "846c0625-e92e-4fce-99c3-ad5934ac0c60"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.928376 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "846c0625-e92e-4fce-99c3-ad5934ac0c60" (UID: "846c0625-e92e-4fce-99c3-ad5934ac0c60"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.928713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "846c0625-e92e-4fce-99c3-ad5934ac0c60" (UID: "846c0625-e92e-4fce-99c3-ad5934ac0c60"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.937629 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config" (OuterVolumeSpecName: "config") pod "846c0625-e92e-4fce-99c3-ad5934ac0c60" (UID: "846c0625-e92e-4fce-99c3-ad5934ac0c60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.980613 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.980650 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.980659 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.980670 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/846c0625-e92e-4fce-99c3-ad5934ac0c60-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:40 crc kubenswrapper[4884]: I1128 16:52:40.980679 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8dmv\" (UniqueName: \"kubernetes.io/projected/846c0625-e92e-4fce-99c3-ad5934ac0c60-kube-api-access-r8dmv\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.192950 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" event={"ID":"846c0625-e92e-4fce-99c3-ad5934ac0c60","Type":"ContainerDied","Data":"057aa9c76a3fca2daa1fcd0be6416e6407d9e278d6bc8d3930863a262c9919ee"} Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.193023 4884 scope.go:117] "RemoveContainer" containerID="e161aa2acb46e092ccf896af782644661e79c90b937e1fd680da7162138ebdd2" Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.193036 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bd5644bf7-k7wjn" Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.212778 4884 scope.go:117] "RemoveContainer" containerID="7d05fd7524e52b3931bcc01ee5fc100e3c595b1dbf02ff0a784d21d2270c94dd" Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.232252 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.238859 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bd5644bf7-k7wjn"] Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.546501 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:41 crc kubenswrapper[4884]: I1128 16:52:41.574650 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77fc564658-4lcch" Nov 28 16:52:42 crc kubenswrapper[4884]: I1128 16:52:42.699863 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" path="/var/lib/kubelet/pods/846c0625-e92e-4fce-99c3-ad5934ac0c60/volumes" Nov 28 16:52:51 crc kubenswrapper[4884]: I1128 16:52:51.244066 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:52:51 crc kubenswrapper[4884]: I1128 16:52:51.244893 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:52:51 crc kubenswrapper[4884]: I1128 16:52:51.244979 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:52:51 crc kubenswrapper[4884]: I1128 16:52:51.245971 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:52:51 crc kubenswrapper[4884]: I1128 16:52:51.246069 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee" gracePeriod=600 Nov 28 16:52:52 crc kubenswrapper[4884]: I1128 16:52:52.300378 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee" exitCode=0 Nov 28 16:52:52 crc kubenswrapper[4884]: I1128 16:52:52.301226 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee"} Nov 28 16:52:52 crc kubenswrapper[4884]: I1128 16:52:52.301264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027"} Nov 28 16:52:52 crc kubenswrapper[4884]: I1128 16:52:52.301283 4884 scope.go:117] "RemoveContainer" containerID="6d0e1c9b6c5bd2f9695574dd9045917d694637adf164e0fa8ac03610b115ee41" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.120959 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-fxdpb"] Nov 28 16:52:53 crc kubenswrapper[4884]: E1128 16:52:53.121699 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="init" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.121729 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="init" Nov 28 16:52:53 crc kubenswrapper[4884]: E1128 16:52:53.121757 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="dnsmasq-dns" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.121768 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="dnsmasq-dns" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.122007 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="846c0625-e92e-4fce-99c3-ad5934ac0c60" containerName="dnsmasq-dns" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.122759 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.128933 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fxdpb"] Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.236804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dprbc\" (UniqueName: \"kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc\") pod \"neutron-db-create-fxdpb\" (UID: \"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408\") " pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.339316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dprbc\" (UniqueName: \"kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc\") pod \"neutron-db-create-fxdpb\" (UID: \"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408\") " pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.367770 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dprbc\" (UniqueName: \"kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc\") pod \"neutron-db-create-fxdpb\" (UID: \"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408\") " pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.446285 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:53 crc kubenswrapper[4884]: I1128 16:52:53.954451 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-fxdpb"] Nov 28 16:52:54 crc kubenswrapper[4884]: I1128 16:52:54.322066 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" containerID="d05acd59d4241883c7d194743b63c4f8b688ebc3a4a6cf30c6896186b9ae949e" exitCode=0 Nov 28 16:52:54 crc kubenswrapper[4884]: I1128 16:52:54.322260 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fxdpb" event={"ID":"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408","Type":"ContainerDied","Data":"d05acd59d4241883c7d194743b63c4f8b688ebc3a4a6cf30c6896186b9ae949e"} Nov 28 16:52:54 crc kubenswrapper[4884]: I1128 16:52:54.322386 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fxdpb" event={"ID":"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408","Type":"ContainerStarted","Data":"c7b044db77e529c870de529be88d977f0f924766f373dbe36462481edaaad7af"} Nov 28 16:52:55 crc kubenswrapper[4884]: I1128 16:52:55.693026 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fxdpb" Nov 28 16:52:55 crc kubenswrapper[4884]: I1128 16:52:55.779000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dprbc\" (UniqueName: \"kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc\") pod \"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408\" (UID: \"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408\") " Nov 28 16:52:55 crc kubenswrapper[4884]: I1128 16:52:55.784600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc" (OuterVolumeSpecName: "kube-api-access-dprbc") pod "a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" (UID: "a8fe0cc2-1f0e-4660-8ca0-5b34ce822408"). InnerVolumeSpecName "kube-api-access-dprbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:52:55 crc kubenswrapper[4884]: I1128 16:52:55.880978 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dprbc\" (UniqueName: \"kubernetes.io/projected/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408-kube-api-access-dprbc\") on node \"crc\" DevicePath \"\"" Nov 28 16:52:56 crc kubenswrapper[4884]: I1128 16:52:56.342854 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-fxdpb" event={"ID":"a8fe0cc2-1f0e-4660-8ca0-5b34ce822408","Type":"ContainerDied","Data":"c7b044db77e529c870de529be88d977f0f924766f373dbe36462481edaaad7af"} Nov 28 16:52:56 crc kubenswrapper[4884]: I1128 16:52:56.342913 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7b044db77e529c870de529be88d977f0f924766f373dbe36462481edaaad7af" Nov 28 16:52:56 crc kubenswrapper[4884]: I1128 16:52:56.342946 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-fxdpb" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.193435 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6465-account-create-z4qc9"] Nov 28 16:53:03 crc kubenswrapper[4884]: E1128 16:53:03.195366 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" containerName="mariadb-database-create" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.195382 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" containerName="mariadb-database-create" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.195525 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" containerName="mariadb-database-create" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.196135 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.204685 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6465-account-create-z4qc9"] Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.205288 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.312738 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b55hr\" (UniqueName: \"kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr\") pod \"neutron-6465-account-create-z4qc9\" (UID: \"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba\") " pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.415320 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b55hr\" (UniqueName: \"kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr\") pod \"neutron-6465-account-create-z4qc9\" (UID: \"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba\") " pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.441382 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b55hr\" (UniqueName: \"kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr\") pod \"neutron-6465-account-create-z4qc9\" (UID: \"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba\") " pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.516104 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:03 crc kubenswrapper[4884]: I1128 16:53:03.978712 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6465-account-create-z4qc9"] Nov 28 16:53:04 crc kubenswrapper[4884]: I1128 16:53:04.417559 4884 generic.go:334] "Generic (PLEG): container finished" podID="fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" containerID="03efcc0c1514cc7ee56e1e64bd8fb5e9dd46cc3b6bac98ec090f810fc14ed8fb" exitCode=0 Nov 28 16:53:04 crc kubenswrapper[4884]: I1128 16:53:04.417664 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465-account-create-z4qc9" event={"ID":"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba","Type":"ContainerDied","Data":"03efcc0c1514cc7ee56e1e64bd8fb5e9dd46cc3b6bac98ec090f810fc14ed8fb"} Nov 28 16:53:04 crc kubenswrapper[4884]: I1128 16:53:04.417941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465-account-create-z4qc9" event={"ID":"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba","Type":"ContainerStarted","Data":"361b8236fd65774b776e8649c44ec087844c0df4cae7a579b894d666cb088fb4"} Nov 28 16:53:05 crc kubenswrapper[4884]: I1128 16:53:05.801001 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:05 crc kubenswrapper[4884]: I1128 16:53:05.862681 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b55hr\" (UniqueName: \"kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr\") pod \"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba\" (UID: \"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba\") " Nov 28 16:53:05 crc kubenswrapper[4884]: I1128 16:53:05.870455 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr" (OuterVolumeSpecName: "kube-api-access-b55hr") pod "fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" (UID: "fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba"). InnerVolumeSpecName "kube-api-access-b55hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:53:05 crc kubenswrapper[4884]: I1128 16:53:05.965862 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b55hr\" (UniqueName: \"kubernetes.io/projected/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba-kube-api-access-b55hr\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:06 crc kubenswrapper[4884]: I1128 16:53:06.445277 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6465-account-create-z4qc9" event={"ID":"fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba","Type":"ContainerDied","Data":"361b8236fd65774b776e8649c44ec087844c0df4cae7a579b894d666cb088fb4"} Nov 28 16:53:06 crc kubenswrapper[4884]: I1128 16:53:06.445323 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="361b8236fd65774b776e8649c44ec087844c0df4cae7a579b894d666cb088fb4" Nov 28 16:53:06 crc kubenswrapper[4884]: I1128 16:53:06.445416 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6465-account-create-z4qc9" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.443923 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-rxcmj"] Nov 28 16:53:08 crc kubenswrapper[4884]: E1128 16:53:08.444345 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" containerName="mariadb-account-create" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.444361 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" containerName="mariadb-account-create" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.444603 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" containerName="mariadb-account-create" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.445385 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.447932 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.448406 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-kcq95" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.448883 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.465514 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rxcmj"] Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.511750 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpfs5\" (UniqueName: \"kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.511888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.511949 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.613685 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpfs5\" (UniqueName: \"kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.613793 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.613838 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.621290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.623162 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.636356 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpfs5\" (UniqueName: \"kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5\") pod \"neutron-db-sync-rxcmj\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:08 crc kubenswrapper[4884]: I1128 16:53:08.767436 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:09 crc kubenswrapper[4884]: I1128 16:53:09.208566 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-rxcmj"] Nov 28 16:53:09 crc kubenswrapper[4884]: W1128 16:53:09.209899 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod395cfd62_9b8e_429a_9b4b_b583eb48d067.slice/crio-0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5 WatchSource:0}: Error finding container 0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5: Status 404 returned error can't find the container with id 0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5 Nov 28 16:53:09 crc kubenswrapper[4884]: I1128 16:53:09.473277 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rxcmj" event={"ID":"395cfd62-9b8e-429a-9b4b-b583eb48d067","Type":"ContainerStarted","Data":"267dc7eec10be1d22a822a1f997b12266412531de59a25fffb7cd9cd229267ab"} Nov 28 16:53:09 crc kubenswrapper[4884]: I1128 16:53:09.473583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rxcmj" event={"ID":"395cfd62-9b8e-429a-9b4b-b583eb48d067","Type":"ContainerStarted","Data":"0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5"} Nov 28 16:53:09 crc kubenswrapper[4884]: I1128 16:53:09.492986 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-rxcmj" podStartSLOduration=1.4929654079999999 podStartE2EDuration="1.492965408s" podCreationTimestamp="2025-11-28 16:53:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:53:09.489559784 +0000 UTC m=+5629.052343585" watchObservedRunningTime="2025-11-28 16:53:09.492965408 +0000 UTC m=+5629.055749209" Nov 28 16:53:13 crc kubenswrapper[4884]: I1128 16:53:13.513206 4884 generic.go:334] "Generic (PLEG): container finished" podID="395cfd62-9b8e-429a-9b4b-b583eb48d067" containerID="267dc7eec10be1d22a822a1f997b12266412531de59a25fffb7cd9cd229267ab" exitCode=0 Nov 28 16:53:13 crc kubenswrapper[4884]: I1128 16:53:13.513303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rxcmj" event={"ID":"395cfd62-9b8e-429a-9b4b-b583eb48d067","Type":"ContainerDied","Data":"267dc7eec10be1d22a822a1f997b12266412531de59a25fffb7cd9cd229267ab"} Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.859337 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.920378 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle\") pod \"395cfd62-9b8e-429a-9b4b-b583eb48d067\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.920446 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config\") pod \"395cfd62-9b8e-429a-9b4b-b583eb48d067\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.920516 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpfs5\" (UniqueName: \"kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5\") pod \"395cfd62-9b8e-429a-9b4b-b583eb48d067\" (UID: \"395cfd62-9b8e-429a-9b4b-b583eb48d067\") " Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.926201 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5" (OuterVolumeSpecName: "kube-api-access-mpfs5") pod "395cfd62-9b8e-429a-9b4b-b583eb48d067" (UID: "395cfd62-9b8e-429a-9b4b-b583eb48d067"). InnerVolumeSpecName "kube-api-access-mpfs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.947967 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "395cfd62-9b8e-429a-9b4b-b583eb48d067" (UID: "395cfd62-9b8e-429a-9b4b-b583eb48d067"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:53:14 crc kubenswrapper[4884]: I1128 16:53:14.949693 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config" (OuterVolumeSpecName: "config") pod "395cfd62-9b8e-429a-9b4b-b583eb48d067" (UID: "395cfd62-9b8e-429a-9b4b-b583eb48d067"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.022146 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.022179 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/395cfd62-9b8e-429a-9b4b-b583eb48d067-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.022190 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpfs5\" (UniqueName: \"kubernetes.io/projected/395cfd62-9b8e-429a-9b4b-b583eb48d067-kube-api-access-mpfs5\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.530350 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-rxcmj" event={"ID":"395cfd62-9b8e-429a-9b4b-b583eb48d067","Type":"ContainerDied","Data":"0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5"} Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.530414 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0477d7dcba2bf8ba002b75a4e0149d4f3b53b43e83eaefad9839f9afe9219ea5" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.530417 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-rxcmj" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.761105 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:53:15 crc kubenswrapper[4884]: E1128 16:53:15.761540 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="395cfd62-9b8e-429a-9b4b-b583eb48d067" containerName="neutron-db-sync" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.761561 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="395cfd62-9b8e-429a-9b4b-b583eb48d067" containerName="neutron-db-sync" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.761809 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="395cfd62-9b8e-429a-9b4b-b583eb48d067" containerName="neutron-db-sync" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.763421 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.783901 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.834030 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.834166 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.834295 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vjpz\" (UniqueName: \"kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.834323 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.834386 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.933530 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-79566747c-k6tf8"] Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.942535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vjpz\" (UniqueName: \"kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.942948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.943197 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.943272 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.943501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.947714 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.948086 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.957504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.959211 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.975161 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.977505 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-kcq95" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.978762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.979207 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.980325 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vjpz\" (UniqueName: \"kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz\") pod \"dnsmasq-dns-5bdbb6684f-j2ct2\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:15 crc kubenswrapper[4884]: I1128 16:53:15.988727 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79566747c-k6tf8"] Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.054377 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-httpd-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.054465 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.054640 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvw4z\" (UniqueName: \"kubernetes.io/projected/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-kube-api-access-tvw4z\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.054741 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-combined-ca-bundle\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.111776 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.156679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-combined-ca-bundle\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.156998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-httpd-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.157064 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.157160 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvw4z\" (UniqueName: \"kubernetes.io/projected/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-kube-api-access-tvw4z\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.161795 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-combined-ca-bundle\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.162047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.162377 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-httpd-config\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.175822 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvw4z\" (UniqueName: \"kubernetes.io/projected/3aa02ca0-aa88-45f0-81cc-38c0b6204d1b-kube-api-access-tvw4z\") pod \"neutron-79566747c-k6tf8\" (UID: \"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b\") " pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.330485 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.588286 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:53:16 crc kubenswrapper[4884]: I1128 16:53:16.933630 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79566747c-k6tf8"] Nov 28 16:53:16 crc kubenswrapper[4884]: W1128 16:53:16.942497 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3aa02ca0_aa88_45f0_81cc_38c0b6204d1b.slice/crio-939a4f31941ad7338786085dec75d91c5a1a35d382519738e62df9470d814d0a WatchSource:0}: Error finding container 939a4f31941ad7338786085dec75d91c5a1a35d382519738e62df9470d814d0a: Status 404 returned error can't find the container with id 939a4f31941ad7338786085dec75d91c5a1a35d382519738e62df9470d814d0a Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.547437 4884 generic.go:334] "Generic (PLEG): container finished" podID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerID="db4fb494ba312c7d21398c3bd60308601da64ef3ef7e97e615dbdcb4f819b96c" exitCode=0 Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.547617 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" event={"ID":"ff557219-e683-4670-a0b1-d2d1ee818a97","Type":"ContainerDied","Data":"db4fb494ba312c7d21398c3bd60308601da64ef3ef7e97e615dbdcb4f819b96c"} Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.547789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" event={"ID":"ff557219-e683-4670-a0b1-d2d1ee818a97","Type":"ContainerStarted","Data":"a3b19121a24ab849a24e3fed77abf86c50eec70355d923acb33f4809c857809c"} Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.556209 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79566747c-k6tf8" event={"ID":"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b","Type":"ContainerStarted","Data":"591883580f4c734e93a9707368adfc8eddb2b72548c26a8b31c635ccb8acb29f"} Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.556256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79566747c-k6tf8" event={"ID":"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b","Type":"ContainerStarted","Data":"68870a34663b3d583140bfafd1890146ca535248deb114653331916a1d1aef7e"} Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.556273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79566747c-k6tf8" event={"ID":"3aa02ca0-aa88-45f0-81cc-38c0b6204d1b","Type":"ContainerStarted","Data":"939a4f31941ad7338786085dec75d91c5a1a35d382519738e62df9470d814d0a"} Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.556542 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:17 crc kubenswrapper[4884]: I1128 16:53:17.597757 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-79566747c-k6tf8" podStartSLOduration=2.59773929 podStartE2EDuration="2.59773929s" podCreationTimestamp="2025-11-28 16:53:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:53:17.587393607 +0000 UTC m=+5637.150177418" watchObservedRunningTime="2025-11-28 16:53:17.59773929 +0000 UTC m=+5637.160523091" Nov 28 16:53:18 crc kubenswrapper[4884]: I1128 16:53:18.572004 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" event={"ID":"ff557219-e683-4670-a0b1-d2d1ee818a97","Type":"ContainerStarted","Data":"361015a8eda54c7dad56a72de8e43885c3ae50ae354e18f2d740bbbc8a0639e3"} Nov 28 16:53:18 crc kubenswrapper[4884]: I1128 16:53:18.594601 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" podStartSLOduration=3.594579474 podStartE2EDuration="3.594579474s" podCreationTimestamp="2025-11-28 16:53:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:53:18.587924832 +0000 UTC m=+5638.150708643" watchObservedRunningTime="2025-11-28 16:53:18.594579474 +0000 UTC m=+5638.157363285" Nov 28 16:53:19 crc kubenswrapper[4884]: I1128 16:53:19.578855 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:27 crc kubenswrapper[4884]: I1128 16:53:26.114693 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:53:27 crc kubenswrapper[4884]: I1128 16:53:27.950065 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:53:27 crc kubenswrapper[4884]: I1128 16:53:27.970427 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5597774b69-l64p5" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="dnsmasq-dns" containerID="cri-o://b37da1f851af2084792cdfac45205855c64720046d2c0cb3a77f7b4a1c6100d5" gracePeriod=10 Nov 28 16:53:28 crc kubenswrapper[4884]: I1128 16:53:28.794567 4884 generic.go:334] "Generic (PLEG): container finished" podID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerID="b37da1f851af2084792cdfac45205855c64720046d2c0cb3a77f7b4a1c6100d5" exitCode=0 Nov 28 16:53:28 crc kubenswrapper[4884]: I1128 16:53:28.794742 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5597774b69-l64p5" event={"ID":"7ad3690c-1e05-4bc8-bdf5-515df004cdbc","Type":"ContainerDied","Data":"b37da1f851af2084792cdfac45205855c64720046d2c0cb3a77f7b4a1c6100d5"} Nov 28 16:53:28 crc kubenswrapper[4884]: I1128 16:53:28.938976 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.097669 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb\") pod \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.097819 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config\") pod \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.097942 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jps2l\" (UniqueName: \"kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l\") pod \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.097994 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc\") pod \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.098193 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb\") pod \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\" (UID: \"7ad3690c-1e05-4bc8-bdf5-515df004cdbc\") " Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.108324 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l" (OuterVolumeSpecName: "kube-api-access-jps2l") pod "7ad3690c-1e05-4bc8-bdf5-515df004cdbc" (UID: "7ad3690c-1e05-4bc8-bdf5-515df004cdbc"). InnerVolumeSpecName "kube-api-access-jps2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.139977 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7ad3690c-1e05-4bc8-bdf5-515df004cdbc" (UID: "7ad3690c-1e05-4bc8-bdf5-515df004cdbc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.142536 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7ad3690c-1e05-4bc8-bdf5-515df004cdbc" (UID: "7ad3690c-1e05-4bc8-bdf5-515df004cdbc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.145251 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7ad3690c-1e05-4bc8-bdf5-515df004cdbc" (UID: "7ad3690c-1e05-4bc8-bdf5-515df004cdbc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.172370 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config" (OuterVolumeSpecName: "config") pod "7ad3690c-1e05-4bc8-bdf5-515df004cdbc" (UID: "7ad3690c-1e05-4bc8-bdf5-515df004cdbc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.200282 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jps2l\" (UniqueName: \"kubernetes.io/projected/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-kube-api-access-jps2l\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.200322 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.200335 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.200345 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.200355 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ad3690c-1e05-4bc8-bdf5-515df004cdbc-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.814531 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5597774b69-l64p5" event={"ID":"7ad3690c-1e05-4bc8-bdf5-515df004cdbc","Type":"ContainerDied","Data":"11678d463b21438d733afc5d0f2901fa4c21a7f9b849d735d53637fb63695da0"} Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.814595 4884 scope.go:117] "RemoveContainer" containerID="b37da1f851af2084792cdfac45205855c64720046d2c0cb3a77f7b4a1c6100d5" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.814602 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5597774b69-l64p5" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.842631 4884 scope.go:117] "RemoveContainer" containerID="e23ee8996afebdb91d6f7df1d8dc7b63ecb700858568750f9bcb14c28f9bdd61" Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.848424 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:53:29 crc kubenswrapper[4884]: I1128 16:53:29.855130 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5597774b69-l64p5"] Nov 28 16:53:30 crc kubenswrapper[4884]: I1128 16:53:30.697608 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" path="/var/lib/kubelet/pods/7ad3690c-1e05-4bc8-bdf5-515df004cdbc/volumes" Nov 28 16:53:46 crc kubenswrapper[4884]: I1128 16:53:46.342949 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-79566747c-k6tf8" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.316239 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-df5hv"] Nov 28 16:53:53 crc kubenswrapper[4884]: E1128 16:53:53.317287 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="init" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.317305 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="init" Nov 28 16:53:53 crc kubenswrapper[4884]: E1128 16:53:53.317323 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="dnsmasq-dns" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.317332 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="dnsmasq-dns" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.317557 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ad3690c-1e05-4bc8-bdf5-515df004cdbc" containerName="dnsmasq-dns" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.318558 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-df5hv" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.325501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-df5hv"] Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.427353 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmqv8\" (UniqueName: \"kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8\") pod \"glance-db-create-df5hv\" (UID: \"6992d312-2342-4862-8838-7158270733a3\") " pod="openstack/glance-db-create-df5hv" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.529110 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmqv8\" (UniqueName: \"kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8\") pod \"glance-db-create-df5hv\" (UID: \"6992d312-2342-4862-8838-7158270733a3\") " pod="openstack/glance-db-create-df5hv" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.554136 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmqv8\" (UniqueName: \"kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8\") pod \"glance-db-create-df5hv\" (UID: \"6992d312-2342-4862-8838-7158270733a3\") " pod="openstack/glance-db-create-df5hv" Nov 28 16:53:53 crc kubenswrapper[4884]: I1128 16:53:53.646843 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-df5hv" Nov 28 16:53:54 crc kubenswrapper[4884]: I1128 16:53:54.102415 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-df5hv"] Nov 28 16:53:54 crc kubenswrapper[4884]: W1128 16:53:54.107613 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6992d312_2342_4862_8838_7158270733a3.slice/crio-b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82 WatchSource:0}: Error finding container b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82: Status 404 returned error can't find the container with id b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82 Nov 28 16:53:55 crc kubenswrapper[4884]: I1128 16:53:55.049827 4884 generic.go:334] "Generic (PLEG): container finished" podID="6992d312-2342-4862-8838-7158270733a3" containerID="a6bffeec389d2ea92a7193f096fae0a78b827688a618c99929eefae16530db0c" exitCode=0 Nov 28 16:53:55 crc kubenswrapper[4884]: I1128 16:53:55.049872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-df5hv" event={"ID":"6992d312-2342-4862-8838-7158270733a3","Type":"ContainerDied","Data":"a6bffeec389d2ea92a7193f096fae0a78b827688a618c99929eefae16530db0c"} Nov 28 16:53:55 crc kubenswrapper[4884]: I1128 16:53:55.051209 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-df5hv" event={"ID":"6992d312-2342-4862-8838-7158270733a3","Type":"ContainerStarted","Data":"b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82"} Nov 28 16:53:56 crc kubenswrapper[4884]: I1128 16:53:56.413565 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-df5hv" Nov 28 16:53:56 crc kubenswrapper[4884]: I1128 16:53:56.578215 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmqv8\" (UniqueName: \"kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8\") pod \"6992d312-2342-4862-8838-7158270733a3\" (UID: \"6992d312-2342-4862-8838-7158270733a3\") " Nov 28 16:53:56 crc kubenswrapper[4884]: I1128 16:53:56.583222 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8" (OuterVolumeSpecName: "kube-api-access-qmqv8") pod "6992d312-2342-4862-8838-7158270733a3" (UID: "6992d312-2342-4862-8838-7158270733a3"). InnerVolumeSpecName "kube-api-access-qmqv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:53:56 crc kubenswrapper[4884]: I1128 16:53:56.680529 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmqv8\" (UniqueName: \"kubernetes.io/projected/6992d312-2342-4862-8838-7158270733a3-kube-api-access-qmqv8\") on node \"crc\" DevicePath \"\"" Nov 28 16:53:57 crc kubenswrapper[4884]: I1128 16:53:57.072393 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-df5hv" event={"ID":"6992d312-2342-4862-8838-7158270733a3","Type":"ContainerDied","Data":"b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82"} Nov 28 16:53:57 crc kubenswrapper[4884]: I1128 16:53:57.072897 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b63ddaa83f59f8f64b64234fe39848e427432af12518708f525e68a3a2736a82" Nov 28 16:53:57 crc kubenswrapper[4884]: I1128 16:53:57.072486 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-df5hv" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.411559 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-4b7e-account-create-2j7zm"] Nov 28 16:54:03 crc kubenswrapper[4884]: E1128 16:54:03.412461 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6992d312-2342-4862-8838-7158270733a3" containerName="mariadb-database-create" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.412473 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6992d312-2342-4862-8838-7158270733a3" containerName="mariadb-database-create" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.412646 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6992d312-2342-4862-8838-7158270733a3" containerName="mariadb-database-create" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.413178 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.415929 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.441835 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4b7e-account-create-2j7zm"] Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.605251 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gv4x\" (UniqueName: \"kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x\") pod \"glance-4b7e-account-create-2j7zm\" (UID: \"becf87c0-e723-40cb-aaae-294092345f12\") " pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.707416 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gv4x\" (UniqueName: \"kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x\") pod \"glance-4b7e-account-create-2j7zm\" (UID: \"becf87c0-e723-40cb-aaae-294092345f12\") " pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.735629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gv4x\" (UniqueName: \"kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x\") pod \"glance-4b7e-account-create-2j7zm\" (UID: \"becf87c0-e723-40cb-aaae-294092345f12\") " pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:03 crc kubenswrapper[4884]: I1128 16:54:03.747617 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:04 crc kubenswrapper[4884]: I1128 16:54:04.189082 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-4b7e-account-create-2j7zm"] Nov 28 16:54:05 crc kubenswrapper[4884]: I1128 16:54:05.169394 4884 generic.go:334] "Generic (PLEG): container finished" podID="becf87c0-e723-40cb-aaae-294092345f12" containerID="1faa3dd20cbb4aae92601a6b8def3e56d961b6effe5c03a69d7229028652f330" exitCode=0 Nov 28 16:54:05 crc kubenswrapper[4884]: I1128 16:54:05.169447 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4b7e-account-create-2j7zm" event={"ID":"becf87c0-e723-40cb-aaae-294092345f12","Type":"ContainerDied","Data":"1faa3dd20cbb4aae92601a6b8def3e56d961b6effe5c03a69d7229028652f330"} Nov 28 16:54:05 crc kubenswrapper[4884]: I1128 16:54:05.169679 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4b7e-account-create-2j7zm" event={"ID":"becf87c0-e723-40cb-aaae-294092345f12","Type":"ContainerStarted","Data":"e1f5097c2a32fcaadeb0f45bedc766b6376a56614fc6d144d235b63e459c48c3"} Nov 28 16:54:06 crc kubenswrapper[4884]: I1128 16:54:06.464717 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:06 crc kubenswrapper[4884]: I1128 16:54:06.567019 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gv4x\" (UniqueName: \"kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x\") pod \"becf87c0-e723-40cb-aaae-294092345f12\" (UID: \"becf87c0-e723-40cb-aaae-294092345f12\") " Nov 28 16:54:06 crc kubenswrapper[4884]: I1128 16:54:06.573171 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x" (OuterVolumeSpecName: "kube-api-access-6gv4x") pod "becf87c0-e723-40cb-aaae-294092345f12" (UID: "becf87c0-e723-40cb-aaae-294092345f12"). InnerVolumeSpecName "kube-api-access-6gv4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:06 crc kubenswrapper[4884]: I1128 16:54:06.669135 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gv4x\" (UniqueName: \"kubernetes.io/projected/becf87c0-e723-40cb-aaae-294092345f12-kube-api-access-6gv4x\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:07 crc kubenswrapper[4884]: I1128 16:54:07.190581 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-4b7e-account-create-2j7zm" event={"ID":"becf87c0-e723-40cb-aaae-294092345f12","Type":"ContainerDied","Data":"e1f5097c2a32fcaadeb0f45bedc766b6376a56614fc6d144d235b63e459c48c3"} Nov 28 16:54:07 crc kubenswrapper[4884]: I1128 16:54:07.190636 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-4b7e-account-create-2j7zm" Nov 28 16:54:07 crc kubenswrapper[4884]: I1128 16:54:07.190649 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1f5097c2a32fcaadeb0f45bedc766b6376a56614fc6d144d235b63e459c48c3" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.472002 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-lxfgf"] Nov 28 16:54:08 crc kubenswrapper[4884]: E1128 16:54:08.472761 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="becf87c0-e723-40cb-aaae-294092345f12" containerName="mariadb-account-create" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.472779 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="becf87c0-e723-40cb-aaae-294092345f12" containerName="mariadb-account-create" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.473003 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="becf87c0-e723-40cb-aaae-294092345f12" containerName="mariadb-account-create" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.473756 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.476029 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-ww7sc" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.476632 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.481716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lxfgf"] Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.607605 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.607702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.607747 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.607914 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdsz4\" (UniqueName: \"kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.709313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.709360 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.709387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.709484 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdsz4\" (UniqueName: \"kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.714259 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.714273 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.714673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.745216 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdsz4\" (UniqueName: \"kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4\") pod \"glance-db-sync-lxfgf\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:08 crc kubenswrapper[4884]: I1128 16:54:08.806477 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:09 crc kubenswrapper[4884]: I1128 16:54:09.353344 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-lxfgf"] Nov 28 16:54:10 crc kubenswrapper[4884]: I1128 16:54:10.247399 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lxfgf" event={"ID":"bf9d842a-9211-40df-9360-d1d46ca4b8be","Type":"ContainerStarted","Data":"620df8a6c8c494af7dee62c7ec34ca7dac620f91597094c24093d136fea78276"} Nov 28 16:54:10 crc kubenswrapper[4884]: I1128 16:54:10.247908 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lxfgf" event={"ID":"bf9d842a-9211-40df-9360-d1d46ca4b8be","Type":"ContainerStarted","Data":"d72f21e81a71e28a3f4dccd471f1adb20e89384f2853c6e9bde94b7de04bd39a"} Nov 28 16:54:10 crc kubenswrapper[4884]: I1128 16:54:10.263028 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-lxfgf" podStartSLOduration=2.263001663 podStartE2EDuration="2.263001663s" podCreationTimestamp="2025-11-28 16:54:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:10.2604396 +0000 UTC m=+5689.823223411" watchObservedRunningTime="2025-11-28 16:54:10.263001663 +0000 UTC m=+5689.825785464" Nov 28 16:54:13 crc kubenswrapper[4884]: I1128 16:54:13.291330 4884 generic.go:334] "Generic (PLEG): container finished" podID="bf9d842a-9211-40df-9360-d1d46ca4b8be" containerID="620df8a6c8c494af7dee62c7ec34ca7dac620f91597094c24093d136fea78276" exitCode=0 Nov 28 16:54:13 crc kubenswrapper[4884]: I1128 16:54:13.291424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lxfgf" event={"ID":"bf9d842a-9211-40df-9360-d1d46ca4b8be","Type":"ContainerDied","Data":"620df8a6c8c494af7dee62c7ec34ca7dac620f91597094c24093d136fea78276"} Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.654503 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.811519 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data\") pod \"bf9d842a-9211-40df-9360-d1d46ca4b8be\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.811884 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdsz4\" (UniqueName: \"kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4\") pod \"bf9d842a-9211-40df-9360-d1d46ca4b8be\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.811961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle\") pod \"bf9d842a-9211-40df-9360-d1d46ca4b8be\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.812191 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data\") pod \"bf9d842a-9211-40df-9360-d1d46ca4b8be\" (UID: \"bf9d842a-9211-40df-9360-d1d46ca4b8be\") " Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.818885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "bf9d842a-9211-40df-9360-d1d46ca4b8be" (UID: "bf9d842a-9211-40df-9360-d1d46ca4b8be"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.818949 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4" (OuterVolumeSpecName: "kube-api-access-tdsz4") pod "bf9d842a-9211-40df-9360-d1d46ca4b8be" (UID: "bf9d842a-9211-40df-9360-d1d46ca4b8be"). InnerVolumeSpecName "kube-api-access-tdsz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.870410 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf9d842a-9211-40df-9360-d1d46ca4b8be" (UID: "bf9d842a-9211-40df-9360-d1d46ca4b8be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.871700 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data" (OuterVolumeSpecName: "config-data") pod "bf9d842a-9211-40df-9360-d1d46ca4b8be" (UID: "bf9d842a-9211-40df-9360-d1d46ca4b8be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.914297 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.914327 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdsz4\" (UniqueName: \"kubernetes.io/projected/bf9d842a-9211-40df-9360-d1d46ca4b8be-kube-api-access-tdsz4\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.914339 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:14 crc kubenswrapper[4884]: I1128 16:54:14.914348 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf9d842a-9211-40df-9360-d1d46ca4b8be-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.316606 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-lxfgf" event={"ID":"bf9d842a-9211-40df-9360-d1d46ca4b8be","Type":"ContainerDied","Data":"d72f21e81a71e28a3f4dccd471f1adb20e89384f2853c6e9bde94b7de04bd39a"} Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.316651 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d72f21e81a71e28a3f4dccd471f1adb20e89384f2853c6e9bde94b7de04bd39a" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.316757 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-lxfgf" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.580038 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:15 crc kubenswrapper[4884]: E1128 16:54:15.580417 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf9d842a-9211-40df-9360-d1d46ca4b8be" containerName="glance-db-sync" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.580431 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf9d842a-9211-40df-9360-d1d46ca4b8be" containerName="glance-db-sync" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.580620 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf9d842a-9211-40df-9360-d1d46ca4b8be" containerName="glance-db-sync" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.581450 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.583946 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-ww7sc" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.584161 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.584298 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.592974 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.613004 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.627927 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628031 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628118 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628186 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628247 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628309 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l92m7\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.628352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.687772 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.690789 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.714238 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730284 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730319 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730341 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730413 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l92m7\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730436 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.730926 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.731040 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.736203 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.737718 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.741763 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.758416 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l92m7\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.759711 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data\") pod \"glance-default-external-api-0\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.828680 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.830829 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.838807 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.838950 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.839106 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.839150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.839310 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqvns\" (UniqueName: \"kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.839369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.849261 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.898511 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.941946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942034 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942083 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqvns\" (UniqueName: \"kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942206 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942287 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942333 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfqn4\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.942689 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.943448 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.943518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.943734 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.944573 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:15 crc kubenswrapper[4884]: I1128 16:54:15.964039 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqvns\" (UniqueName: \"kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns\") pod \"dnsmasq-dns-6656ccfb9c-92sfp\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.017310 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.043873 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfqn4\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044233 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044260 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044282 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044337 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.044375 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.045158 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.045420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.053289 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.053718 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.055071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.057952 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.065811 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfqn4\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4\") pod \"glance-default-internal-api-0\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.185938 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.511895 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.528714 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:54:16 crc kubenswrapper[4884]: W1128 16:54:16.532652 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16b0708b_8147_421f_a3da_5d2556683564.slice/crio-29c960b7f6ca38245df8323508305f2c455543d4fcfb37fe7e0f1f60a26f8379 WatchSource:0}: Error finding container 29c960b7f6ca38245df8323508305f2c455543d4fcfb37fe7e0f1f60a26f8379: Status 404 returned error can't find the container with id 29c960b7f6ca38245df8323508305f2c455543d4fcfb37fe7e0f1f60a26f8379 Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.822452 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:16 crc kubenswrapper[4884]: I1128 16:54:16.899548 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.342396 4884 generic.go:334] "Generic (PLEG): container finished" podID="16b0708b-8147-421f-a3da-5d2556683564" containerID="330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4" exitCode=0 Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.342471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" event={"ID":"16b0708b-8147-421f-a3da-5d2556683564","Type":"ContainerDied","Data":"330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4"} Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.342498 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" event={"ID":"16b0708b-8147-421f-a3da-5d2556683564","Type":"ContainerStarted","Data":"29c960b7f6ca38245df8323508305f2c455543d4fcfb37fe7e0f1f60a26f8379"} Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.346845 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerStarted","Data":"715b9cfb1663a5a3735b49606c1f945fb6e20c75f8e8a64e7f727fadd6e4f2bf"} Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.352348 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerStarted","Data":"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549"} Nov 28 16:54:17 crc kubenswrapper[4884]: I1128 16:54:17.352403 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerStarted","Data":"3a739209e36712bb181d582ea34dc7b5e30a65147fb4fc6502373d7bf4ea467d"} Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.361357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" event={"ID":"16b0708b-8147-421f-a3da-5d2556683564","Type":"ContainerStarted","Data":"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42"} Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.362796 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.362815 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerStarted","Data":"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380"} Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.362827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerStarted","Data":"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260"} Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.364808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerStarted","Data":"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f"} Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.365048 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-log" containerID="cri-o://4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" gracePeriod=30 Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.365073 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-httpd" containerID="cri-o://10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" gracePeriod=30 Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.406930 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.406909654 podStartE2EDuration="3.406909654s" podCreationTimestamp="2025-11-28 16:54:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:18.400898597 +0000 UTC m=+5697.963682408" watchObservedRunningTime="2025-11-28 16:54:18.406909654 +0000 UTC m=+5697.969693455" Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.407105 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" podStartSLOduration=3.407079468 podStartE2EDuration="3.407079468s" podCreationTimestamp="2025-11-28 16:54:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:18.380743374 +0000 UTC m=+5697.943527175" watchObservedRunningTime="2025-11-28 16:54:18.407079468 +0000 UTC m=+5697.969863269" Nov 28 16:54:18 crc kubenswrapper[4884]: I1128 16:54:18.429852 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.429831916 podStartE2EDuration="3.429831916s" podCreationTimestamp="2025-11-28 16:54:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:18.421317526 +0000 UTC m=+5697.984101327" watchObservedRunningTime="2025-11-28 16:54:18.429831916 +0000 UTC m=+5697.992615707" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.013597 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.104278 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230033 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230289 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230311 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230335 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l92m7\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230371 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.230603 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs\") pod \"e992931e-dd7c-45e9-8800-e41eb0915116\" (UID: \"e992931e-dd7c-45e9-8800-e41eb0915116\") " Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.231129 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs" (OuterVolumeSpecName: "logs") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.231431 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.231567 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.241289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts" (OuterVolumeSpecName: "scripts") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.241414 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph" (OuterVolumeSpecName: "ceph") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.241477 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7" (OuterVolumeSpecName: "kube-api-access-l92m7") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "kube-api-access-l92m7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.263604 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.287176 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data" (OuterVolumeSpecName: "config-data") pod "e992931e-dd7c-45e9-8800-e41eb0915116" (UID: "e992931e-dd7c-45e9-8800-e41eb0915116"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333453 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333504 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l92m7\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-kube-api-access-l92m7\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333519 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/e992931e-dd7c-45e9-8800-e41eb0915116-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333534 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e992931e-dd7c-45e9-8800-e41eb0915116-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333550 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.333561 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e992931e-dd7c-45e9-8800-e41eb0915116-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.377602 4884 generic.go:334] "Generic (PLEG): container finished" podID="e992931e-dd7c-45e9-8800-e41eb0915116" containerID="10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" exitCode=0 Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.378578 4884 generic.go:334] "Generic (PLEG): container finished" podID="e992931e-dd7c-45e9-8800-e41eb0915116" containerID="4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" exitCode=143 Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.377675 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.377658 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerDied","Data":"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f"} Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.378806 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerDied","Data":"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549"} Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.378831 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e992931e-dd7c-45e9-8800-e41eb0915116","Type":"ContainerDied","Data":"3a739209e36712bb181d582ea34dc7b5e30a65147fb4fc6502373d7bf4ea467d"} Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.378855 4884 scope.go:117] "RemoveContainer" containerID="10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.446618 4884 scope.go:117] "RemoveContainer" containerID="4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.450468 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.475475 4884 scope.go:117] "RemoveContainer" containerID="10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" Nov 28 16:54:19 crc kubenswrapper[4884]: E1128 16:54:19.479103 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f\": container with ID starting with 10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f not found: ID does not exist" containerID="10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.479154 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f"} err="failed to get container status \"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f\": rpc error: code = NotFound desc = could not find container \"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f\": container with ID starting with 10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f not found: ID does not exist" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.479185 4884 scope.go:117] "RemoveContainer" containerID="4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.479283 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:19 crc kubenswrapper[4884]: E1128 16:54:19.479688 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549\": container with ID starting with 4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549 not found: ID does not exist" containerID="4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.479749 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549"} err="failed to get container status \"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549\": rpc error: code = NotFound desc = could not find container \"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549\": container with ID starting with 4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549 not found: ID does not exist" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.479782 4884 scope.go:117] "RemoveContainer" containerID="10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.480082 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f"} err="failed to get container status \"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f\": rpc error: code = NotFound desc = could not find container \"10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f\": container with ID starting with 10cef075c26da05b9e6d9dbef13b71829e49e3a91f77beb4f35c5c584cd8f72f not found: ID does not exist" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.480129 4884 scope.go:117] "RemoveContainer" containerID="4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.480469 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549"} err="failed to get container status \"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549\": rpc error: code = NotFound desc = could not find container \"4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549\": container with ID starting with 4920f4cbe871cc55c9c8d04c2b4647f34ac9b6ebb6204158e63077d1b0961549 not found: ID does not exist" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.488739 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:19 crc kubenswrapper[4884]: E1128 16:54:19.489244 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-httpd" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.489274 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-httpd" Nov 28 16:54:19 crc kubenswrapper[4884]: E1128 16:54:19.489336 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-log" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.489346 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-log" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.489550 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-httpd" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.489601 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" containerName="glance-log" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.490904 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.494278 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.501777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647028 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647079 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647129 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnlcj\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647557 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.647613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749443 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749541 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnlcj\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749639 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749758 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.749787 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.750417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.751374 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.755604 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.756080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.756742 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.756912 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.767574 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnlcj\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj\") pod \"glance-default-external-api-0\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " pod="openstack/glance-default-external-api-0" Nov 28 16:54:19 crc kubenswrapper[4884]: I1128 16:54:19.809903 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 16:54:20 crc kubenswrapper[4884]: W1128 16:54:20.320564 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f05bc4a_3dba_42a9_b784_7ff51f6b078d.slice/crio-75b28bf636b183561d3131047d061f7dcbf567111537dead09a1466b6498268e WatchSource:0}: Error finding container 75b28bf636b183561d3131047d061f7dcbf567111537dead09a1466b6498268e: Status 404 returned error can't find the container with id 75b28bf636b183561d3131047d061f7dcbf567111537dead09a1466b6498268e Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.321024 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.390614 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerStarted","Data":"75b28bf636b183561d3131047d061f7dcbf567111537dead09a1466b6498268e"} Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.390950 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-log" containerID="cri-o://14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" gracePeriod=30 Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.391264 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-httpd" containerID="cri-o://7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" gracePeriod=30 Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.713810 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e992931e-dd7c-45e9-8800-e41eb0915116" path="/var/lib/kubelet/pods/e992931e-dd7c-45e9-8800-e41eb0915116/volumes" Nov 28 16:54:20 crc kubenswrapper[4884]: I1128 16:54:20.882573 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.075480 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.075848 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.075887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfqn4\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.075934 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.075959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.076133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.076162 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run\") pod \"9f0ae183-9bf0-41f3-9330-637af9a5558e\" (UID: \"9f0ae183-9bf0-41f3-9330-637af9a5558e\") " Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.076223 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs" (OuterVolumeSpecName: "logs") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.076812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.077827 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.077861 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9f0ae183-9bf0-41f3-9330-637af9a5558e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.080214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts" (OuterVolumeSpecName: "scripts") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.080302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4" (OuterVolumeSpecName: "kube-api-access-zfqn4") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "kube-api-access-zfqn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.081453 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph" (OuterVolumeSpecName: "ceph") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.105549 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.127672 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data" (OuterVolumeSpecName: "config-data") pod "9f0ae183-9bf0-41f3-9330-637af9a5558e" (UID: "9f0ae183-9bf0-41f3-9330-637af9a5558e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.179155 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.179191 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfqn4\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-kube-api-access-zfqn4\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.179204 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.179215 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f0ae183-9bf0-41f3-9330-637af9a5558e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.179225 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9f0ae183-9bf0-41f3-9330-637af9a5558e-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400316 4884 generic.go:334] "Generic (PLEG): container finished" podID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerID="7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" exitCode=0 Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400368 4884 generic.go:334] "Generic (PLEG): container finished" podID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerID="14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" exitCode=143 Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400403 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400407 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerDied","Data":"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380"} Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400557 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerDied","Data":"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260"} Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"9f0ae183-9bf0-41f3-9330-637af9a5558e","Type":"ContainerDied","Data":"715b9cfb1663a5a3735b49606c1f945fb6e20c75f8e8a64e7f727fadd6e4f2bf"} Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.400643 4884 scope.go:117] "RemoveContainer" containerID="7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.403490 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerStarted","Data":"db76e2b33ac3a57a0cd8b028e01644f3dd2972554275c793bebf94fae884c6a7"} Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.403566 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerStarted","Data":"0297fac66f6f8eda976c4bbc4c1711d9be1fa6137e73637339b9197cfc0a4ea5"} Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.428671 4884 scope.go:117] "RemoveContainer" containerID="14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.443602 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=2.443578915 podStartE2EDuration="2.443578915s" podCreationTimestamp="2025-11-28 16:54:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:21.421156386 +0000 UTC m=+5700.983940217" watchObservedRunningTime="2025-11-28 16:54:21.443578915 +0000 UTC m=+5701.006362716" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.475911 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.477243 4884 scope.go:117] "RemoveContainer" containerID="7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" Nov 28 16:54:21 crc kubenswrapper[4884]: E1128 16:54:21.479272 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380\": container with ID starting with 7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380 not found: ID does not exist" containerID="7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.479314 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380"} err="failed to get container status \"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380\": rpc error: code = NotFound desc = could not find container \"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380\": container with ID starting with 7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380 not found: ID does not exist" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.479342 4884 scope.go:117] "RemoveContainer" containerID="14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" Nov 28 16:54:21 crc kubenswrapper[4884]: E1128 16:54:21.480254 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260\": container with ID starting with 14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260 not found: ID does not exist" containerID="14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.480296 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260"} err="failed to get container status \"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260\": rpc error: code = NotFound desc = could not find container \"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260\": container with ID starting with 14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260 not found: ID does not exist" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.480314 4884 scope.go:117] "RemoveContainer" containerID="7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.482017 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380"} err="failed to get container status \"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380\": rpc error: code = NotFound desc = could not find container \"7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380\": container with ID starting with 7b01e54e088cc71fff9dd785f9dd7936edc01a248c609fb61d5489b92f774380 not found: ID does not exist" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.482066 4884 scope.go:117] "RemoveContainer" containerID="14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.483423 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.488571 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260"} err="failed to get container status \"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260\": rpc error: code = NotFound desc = could not find container \"14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260\": container with ID starting with 14abb894b07deec1517f7c0cfe94cc101f36bc5c43377b04e88ceab4f78fb260 not found: ID does not exist" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.492040 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:21 crc kubenswrapper[4884]: E1128 16:54:21.492472 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-log" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.492492 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-log" Nov 28 16:54:21 crc kubenswrapper[4884]: E1128 16:54:21.492516 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-httpd" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.492524 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-httpd" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.492703 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-httpd" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.492728 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" containerName="glance-log" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.493682 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.495659 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.500927 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.586669 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.586815 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.586934 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.586997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.587220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.587328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.587363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhrxk\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.688642 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.688700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.688747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.688772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.688790 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhrxk\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.689557 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.689664 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.689747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.690192 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.694875 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.694959 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.695726 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.699187 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.707396 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhrxk\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk\") pod \"glance-default-internal-api-0\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " pod="openstack/glance-default-internal-api-0" Nov 28 16:54:21 crc kubenswrapper[4884]: I1128 16:54:21.813130 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:22 crc kubenswrapper[4884]: I1128 16:54:22.341759 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 16:54:22 crc kubenswrapper[4884]: W1128 16:54:22.345260 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5281e6a8_491d_4814_93e9_b93604eeb4a6.slice/crio-c33b6d96e9f0b25ea6149f193f59862be404e5f4f845aa746f66034d9beb7bd1 WatchSource:0}: Error finding container c33b6d96e9f0b25ea6149f193f59862be404e5f4f845aa746f66034d9beb7bd1: Status 404 returned error can't find the container with id c33b6d96e9f0b25ea6149f193f59862be404e5f4f845aa746f66034d9beb7bd1 Nov 28 16:54:22 crc kubenswrapper[4884]: I1128 16:54:22.423451 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerStarted","Data":"c33b6d96e9f0b25ea6149f193f59862be404e5f4f845aa746f66034d9beb7bd1"} Nov 28 16:54:22 crc kubenswrapper[4884]: I1128 16:54:22.699332 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f0ae183-9bf0-41f3-9330-637af9a5558e" path="/var/lib/kubelet/pods/9f0ae183-9bf0-41f3-9330-637af9a5558e/volumes" Nov 28 16:54:23 crc kubenswrapper[4884]: I1128 16:54:23.433964 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerStarted","Data":"d05f78462ca296c523d1efae6ccf83fbeda3fad2ac62f298320d0f376a46123c"} Nov 28 16:54:23 crc kubenswrapper[4884]: I1128 16:54:23.434365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerStarted","Data":"0f956f1363b968b23f07aa39f48df67e0908b751738b327949b2377feee988b1"} Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.020126 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.036953 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.03693143 podStartE2EDuration="5.03693143s" podCreationTimestamp="2025-11-28 16:54:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:23.45266552 +0000 UTC m=+5703.015449331" watchObservedRunningTime="2025-11-28 16:54:26.03693143 +0000 UTC m=+5705.599715231" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.128652 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.128898 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="dnsmasq-dns" containerID="cri-o://361015a8eda54c7dad56a72de8e43885c3ae50ae354e18f2d740bbbc8a0639e3" gracePeriod=10 Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.466342 4884 generic.go:334] "Generic (PLEG): container finished" podID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerID="361015a8eda54c7dad56a72de8e43885c3ae50ae354e18f2d740bbbc8a0639e3" exitCode=0 Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.466388 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" event={"ID":"ff557219-e683-4670-a0b1-d2d1ee818a97","Type":"ContainerDied","Data":"361015a8eda54c7dad56a72de8e43885c3ae50ae354e18f2d740bbbc8a0639e3"} Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.597414 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.689828 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config\") pod \"ff557219-e683-4670-a0b1-d2d1ee818a97\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.689899 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vjpz\" (UniqueName: \"kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz\") pod \"ff557219-e683-4670-a0b1-d2d1ee818a97\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.690122 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc\") pod \"ff557219-e683-4670-a0b1-d2d1ee818a97\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.690307 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb\") pod \"ff557219-e683-4670-a0b1-d2d1ee818a97\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.690346 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb\") pod \"ff557219-e683-4670-a0b1-d2d1ee818a97\" (UID: \"ff557219-e683-4670-a0b1-d2d1ee818a97\") " Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.697557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz" (OuterVolumeSpecName: "kube-api-access-2vjpz") pod "ff557219-e683-4670-a0b1-d2d1ee818a97" (UID: "ff557219-e683-4670-a0b1-d2d1ee818a97"). InnerVolumeSpecName "kube-api-access-2vjpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.734673 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff557219-e683-4670-a0b1-d2d1ee818a97" (UID: "ff557219-e683-4670-a0b1-d2d1ee818a97"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.734977 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config" (OuterVolumeSpecName: "config") pod "ff557219-e683-4670-a0b1-d2d1ee818a97" (UID: "ff557219-e683-4670-a0b1-d2d1ee818a97"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.738758 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff557219-e683-4670-a0b1-d2d1ee818a97" (UID: "ff557219-e683-4670-a0b1-d2d1ee818a97"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.765636 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff557219-e683-4670-a0b1-d2d1ee818a97" (UID: "ff557219-e683-4670-a0b1-d2d1ee818a97"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.793122 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.793161 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.793176 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.793189 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff557219-e683-4670-a0b1-d2d1ee818a97-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:26 crc kubenswrapper[4884]: I1128 16:54:26.793201 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vjpz\" (UniqueName: \"kubernetes.io/projected/ff557219-e683-4670-a0b1-d2d1ee818a97-kube-api-access-2vjpz\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.476779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" event={"ID":"ff557219-e683-4670-a0b1-d2d1ee818a97","Type":"ContainerDied","Data":"a3b19121a24ab849a24e3fed77abf86c50eec70355d923acb33f4809c857809c"} Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.476831 4884 scope.go:117] "RemoveContainer" containerID="361015a8eda54c7dad56a72de8e43885c3ae50ae354e18f2d740bbbc8a0639e3" Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.476861 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bdbb6684f-j2ct2" Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.517129 4884 scope.go:117] "RemoveContainer" containerID="db4fb494ba312c7d21398c3bd60308601da64ef3ef7e97e615dbdcb4f819b96c" Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.524313 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:54:27 crc kubenswrapper[4884]: I1128 16:54:27.533285 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bdbb6684f-j2ct2"] Nov 28 16:54:28 crc kubenswrapper[4884]: I1128 16:54:28.700253 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" path="/var/lib/kubelet/pods/ff557219-e683-4670-a0b1-d2d1ee818a97/volumes" Nov 28 16:54:29 crc kubenswrapper[4884]: I1128 16:54:29.811008 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:54:29 crc kubenswrapper[4884]: I1128 16:54:29.811560 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 16:54:29 crc kubenswrapper[4884]: I1128 16:54:29.842528 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:54:29 crc kubenswrapper[4884]: I1128 16:54:29.850066 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 16:54:30 crc kubenswrapper[4884]: I1128 16:54:30.044233 4884 scope.go:117] "RemoveContainer" containerID="a246be2c484945847e845a62d246895251e6c947dff1e9d067a951464e2d6d18" Nov 28 16:54:30 crc kubenswrapper[4884]: I1128 16:54:30.064780 4884 scope.go:117] "RemoveContainer" containerID="fc25f88e7c9fda9926c782282192c5cd1028c84c5f1bd33e470b1a1c7870b5c8" Nov 28 16:54:30 crc kubenswrapper[4884]: I1128 16:54:30.511043 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:54:30 crc kubenswrapper[4884]: I1128 16:54:30.511103 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 16:54:31 crc kubenswrapper[4884]: I1128 16:54:31.813472 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:31 crc kubenswrapper[4884]: I1128 16:54:31.813795 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:31 crc kubenswrapper[4884]: I1128 16:54:31.841347 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:31 crc kubenswrapper[4884]: I1128 16:54:31.851222 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:32 crc kubenswrapper[4884]: I1128 16:54:32.524483 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:32 crc kubenswrapper[4884]: I1128 16:54:32.524534 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:32 crc kubenswrapper[4884]: I1128 16:54:32.565158 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:54:32 crc kubenswrapper[4884]: I1128 16:54:32.565264 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:54:32 crc kubenswrapper[4884]: I1128 16:54:32.603648 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 16:54:34 crc kubenswrapper[4884]: I1128 16:54:34.631702 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:34 crc kubenswrapper[4884]: I1128 16:54:34.632202 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 16:54:34 crc kubenswrapper[4884]: I1128 16:54:34.645349 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.472235 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-wjsqr"] Nov 28 16:54:40 crc kubenswrapper[4884]: E1128 16:54:40.473248 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="init" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.473264 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="init" Nov 28 16:54:40 crc kubenswrapper[4884]: E1128 16:54:40.473292 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="dnsmasq-dns" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.473299 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="dnsmasq-dns" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.473502 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff557219-e683-4670-a0b1-d2d1ee818a97" containerName="dnsmasq-dns" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.474286 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.483154 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wjsqr"] Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.576073 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2zbx\" (UniqueName: \"kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx\") pod \"placement-db-create-wjsqr\" (UID: \"5a118041-0fdb-4d62-b06c-88ae23dde1c9\") " pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.677837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2zbx\" (UniqueName: \"kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx\") pod \"placement-db-create-wjsqr\" (UID: \"5a118041-0fdb-4d62-b06c-88ae23dde1c9\") " pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.698904 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2zbx\" (UniqueName: \"kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx\") pod \"placement-db-create-wjsqr\" (UID: \"5a118041-0fdb-4d62-b06c-88ae23dde1c9\") " pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:40 crc kubenswrapper[4884]: I1128 16:54:40.793844 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:41 crc kubenswrapper[4884]: I1128 16:54:41.253821 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wjsqr"] Nov 28 16:54:41 crc kubenswrapper[4884]: W1128 16:54:41.254192 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a118041_0fdb_4d62_b06c_88ae23dde1c9.slice/crio-a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9 WatchSource:0}: Error finding container a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9: Status 404 returned error can't find the container with id a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9 Nov 28 16:54:41 crc kubenswrapper[4884]: I1128 16:54:41.611352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wjsqr" event={"ID":"5a118041-0fdb-4d62-b06c-88ae23dde1c9","Type":"ContainerStarted","Data":"a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9"} Nov 28 16:54:42 crc kubenswrapper[4884]: I1128 16:54:42.626022 4884 generic.go:334] "Generic (PLEG): container finished" podID="5a118041-0fdb-4d62-b06c-88ae23dde1c9" containerID="85a491cbcb1c71b0f2815f15062500a70ce51e676422cd6060b28bacaad6b45b" exitCode=0 Nov 28 16:54:42 crc kubenswrapper[4884]: I1128 16:54:42.626081 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wjsqr" event={"ID":"5a118041-0fdb-4d62-b06c-88ae23dde1c9","Type":"ContainerDied","Data":"85a491cbcb1c71b0f2815f15062500a70ce51e676422cd6060b28bacaad6b45b"} Nov 28 16:54:43 crc kubenswrapper[4884]: I1128 16:54:43.994968 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.143631 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2zbx\" (UniqueName: \"kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx\") pod \"5a118041-0fdb-4d62-b06c-88ae23dde1c9\" (UID: \"5a118041-0fdb-4d62-b06c-88ae23dde1c9\") " Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.149339 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx" (OuterVolumeSpecName: "kube-api-access-w2zbx") pod "5a118041-0fdb-4d62-b06c-88ae23dde1c9" (UID: "5a118041-0fdb-4d62-b06c-88ae23dde1c9"). InnerVolumeSpecName "kube-api-access-w2zbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.245168 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2zbx\" (UniqueName: \"kubernetes.io/projected/5a118041-0fdb-4d62-b06c-88ae23dde1c9-kube-api-access-w2zbx\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.645190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wjsqr" event={"ID":"5a118041-0fdb-4d62-b06c-88ae23dde1c9","Type":"ContainerDied","Data":"a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9"} Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.645459 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a83c6868a392c0e806eb8096aa40f70daa68f529be1733e6b3df4a4f6e5786e9" Nov 28 16:54:44 crc kubenswrapper[4884]: I1128 16:54:44.645267 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wjsqr" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.522406 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-03c4-account-create-bn9g7"] Nov 28 16:54:50 crc kubenswrapper[4884]: E1128 16:54:50.523247 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a118041-0fdb-4d62-b06c-88ae23dde1c9" containerName="mariadb-database-create" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.523258 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a118041-0fdb-4d62-b06c-88ae23dde1c9" containerName="mariadb-database-create" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.523419 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a118041-0fdb-4d62-b06c-88ae23dde1c9" containerName="mariadb-database-create" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.524074 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.525747 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.545265 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-03c4-account-create-bn9g7"] Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.655511 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqrbg\" (UniqueName: \"kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg\") pod \"placement-03c4-account-create-bn9g7\" (UID: \"2b58ca55-9063-4455-a802-634f36055904\") " pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.756949 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqrbg\" (UniqueName: \"kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg\") pod \"placement-03c4-account-create-bn9g7\" (UID: \"2b58ca55-9063-4455-a802-634f36055904\") " pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.777995 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqrbg\" (UniqueName: \"kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg\") pod \"placement-03c4-account-create-bn9g7\" (UID: \"2b58ca55-9063-4455-a802-634f36055904\") " pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:50 crc kubenswrapper[4884]: I1128 16:54:50.840616 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.242951 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.243458 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.249605 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-03c4-account-create-bn9g7"] Nov 28 16:54:51 crc kubenswrapper[4884]: W1128 16:54:51.255363 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b58ca55_9063_4455_a802_634f36055904.slice/crio-4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d WatchSource:0}: Error finding container 4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d: Status 404 returned error can't find the container with id 4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.703311 4884 generic.go:334] "Generic (PLEG): container finished" podID="2b58ca55-9063-4455-a802-634f36055904" containerID="b24658c84ba428d1972ead9ec937d2cf25ebf0e12939f0ecfad11f82bc3dc6fb" exitCode=0 Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.704343 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-03c4-account-create-bn9g7" event={"ID":"2b58ca55-9063-4455-a802-634f36055904","Type":"ContainerDied","Data":"b24658c84ba428d1972ead9ec937d2cf25ebf0e12939f0ecfad11f82bc3dc6fb"} Nov 28 16:54:51 crc kubenswrapper[4884]: I1128 16:54:51.704451 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-03c4-account-create-bn9g7" event={"ID":"2b58ca55-9063-4455-a802-634f36055904","Type":"ContainerStarted","Data":"4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d"} Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.071368 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.202017 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqrbg\" (UniqueName: \"kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg\") pod \"2b58ca55-9063-4455-a802-634f36055904\" (UID: \"2b58ca55-9063-4455-a802-634f36055904\") " Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.221281 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg" (OuterVolumeSpecName: "kube-api-access-gqrbg") pod "2b58ca55-9063-4455-a802-634f36055904" (UID: "2b58ca55-9063-4455-a802-634f36055904"). InnerVolumeSpecName "kube-api-access-gqrbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.303634 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqrbg\" (UniqueName: \"kubernetes.io/projected/2b58ca55-9063-4455-a802-634f36055904-kube-api-access-gqrbg\") on node \"crc\" DevicePath \"\"" Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.720133 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-03c4-account-create-bn9g7" event={"ID":"2b58ca55-9063-4455-a802-634f36055904","Type":"ContainerDied","Data":"4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d"} Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.720181 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e589e02089e7a2f0a672e1ddc1dfa0c95b276c15876e41fe7708812e094317d" Nov 28 16:54:53 crc kubenswrapper[4884]: I1128 16:54:53.720215 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-03c4-account-create-bn9g7" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.747809 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:54:55 crc kubenswrapper[4884]: E1128 16:54:55.753817 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b58ca55-9063-4455-a802-634f36055904" containerName="mariadb-account-create" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.753853 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b58ca55-9063-4455-a802-634f36055904" containerName="mariadb-account-create" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.754112 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b58ca55-9063-4455-a802-634f36055904" containerName="mariadb-account-create" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.755659 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.766137 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.792888 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-2bl49"] Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.794292 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.797458 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.797610 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mn5cs" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.798295 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.809770 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2bl49"] Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.854976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855056 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855123 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855154 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855199 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpm8z\" (UniqueName: \"kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855239 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855283 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.855301 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2skk\" (UniqueName: \"kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957542 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957607 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957642 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957658 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpm8z\" (UniqueName: \"kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957687 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957715 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2skk\" (UniqueName: \"kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957821 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.957834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.959607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.959640 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.959651 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.959988 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.961114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.963069 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.963208 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.963518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.975760 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2skk\" (UniqueName: \"kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk\") pod \"placement-db-sync-2bl49\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:55 crc kubenswrapper[4884]: I1128 16:54:55.980228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpm8z\" (UniqueName: \"kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z\") pod \"dnsmasq-dns-55778596c9-v9n6n\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.151350 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.165418 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2bl49" Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.613267 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2bl49"] Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.697759 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:54:56 crc kubenswrapper[4884]: W1128 16:54:56.705528 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod810a04af_eb91_4371_8b43_a1b733bef247.slice/crio-37fa3d27756dd3fb44af29246a7d15577513def9438cd8b6d69ca991e8ffe65b WatchSource:0}: Error finding container 37fa3d27756dd3fb44af29246a7d15577513def9438cd8b6d69ca991e8ffe65b: Status 404 returned error can't find the container with id 37fa3d27756dd3fb44af29246a7d15577513def9438cd8b6d69ca991e8ffe65b Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.754768 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" event={"ID":"810a04af-eb91-4371-8b43-a1b733bef247","Type":"ContainerStarted","Data":"37fa3d27756dd3fb44af29246a7d15577513def9438cd8b6d69ca991e8ffe65b"} Nov 28 16:54:56 crc kubenswrapper[4884]: I1128 16:54:56.756661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2bl49" event={"ID":"829cdd1b-9af4-4f6f-bf15-75b8208249e6","Type":"ContainerStarted","Data":"20ac289d1f3d78e5671ee901344b4836b054f8dc0019525633786cf41b694762"} Nov 28 16:54:57 crc kubenswrapper[4884]: I1128 16:54:57.771058 4884 generic.go:334] "Generic (PLEG): container finished" podID="810a04af-eb91-4371-8b43-a1b733bef247" containerID="cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579" exitCode=0 Nov 28 16:54:57 crc kubenswrapper[4884]: I1128 16:54:57.771175 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" event={"ID":"810a04af-eb91-4371-8b43-a1b733bef247","Type":"ContainerDied","Data":"cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579"} Nov 28 16:54:57 crc kubenswrapper[4884]: I1128 16:54:57.774064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2bl49" event={"ID":"829cdd1b-9af4-4f6f-bf15-75b8208249e6","Type":"ContainerStarted","Data":"88463fc76124fb44e166f3ca7a089dced44e596f98e9bbe7b305c4c7433677b0"} Nov 28 16:54:57 crc kubenswrapper[4884]: I1128 16:54:57.832596 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-2bl49" podStartSLOduration=2.832574324 podStartE2EDuration="2.832574324s" podCreationTimestamp="2025-11-28 16:54:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:57.825660295 +0000 UTC m=+5737.388444146" watchObservedRunningTime="2025-11-28 16:54:57.832574324 +0000 UTC m=+5737.395358135" Nov 28 16:54:58 crc kubenswrapper[4884]: I1128 16:54:58.794536 4884 generic.go:334] "Generic (PLEG): container finished" podID="829cdd1b-9af4-4f6f-bf15-75b8208249e6" containerID="88463fc76124fb44e166f3ca7a089dced44e596f98e9bbe7b305c4c7433677b0" exitCode=0 Nov 28 16:54:58 crc kubenswrapper[4884]: I1128 16:54:58.794605 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2bl49" event={"ID":"829cdd1b-9af4-4f6f-bf15-75b8208249e6","Type":"ContainerDied","Data":"88463fc76124fb44e166f3ca7a089dced44e596f98e9bbe7b305c4c7433677b0"} Nov 28 16:54:58 crc kubenswrapper[4884]: I1128 16:54:58.810821 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" event={"ID":"810a04af-eb91-4371-8b43-a1b733bef247","Type":"ContainerStarted","Data":"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f"} Nov 28 16:54:58 crc kubenswrapper[4884]: I1128 16:54:58.811114 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:54:58 crc kubenswrapper[4884]: I1128 16:54:58.847418 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" podStartSLOduration=3.847402938 podStartE2EDuration="3.847402938s" podCreationTimestamp="2025-11-28 16:54:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:54:58.844719852 +0000 UTC m=+5738.407503683" watchObservedRunningTime="2025-11-28 16:54:58.847402938 +0000 UTC m=+5738.410186739" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.135656 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2bl49" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.235049 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data\") pod \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.235102 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts\") pod \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.235164 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle\") pod \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.235265 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs\") pod \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.235327 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2skk\" (UniqueName: \"kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk\") pod \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\" (UID: \"829cdd1b-9af4-4f6f-bf15-75b8208249e6\") " Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.236209 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs" (OuterVolumeSpecName: "logs") pod "829cdd1b-9af4-4f6f-bf15-75b8208249e6" (UID: "829cdd1b-9af4-4f6f-bf15-75b8208249e6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.243830 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts" (OuterVolumeSpecName: "scripts") pod "829cdd1b-9af4-4f6f-bf15-75b8208249e6" (UID: "829cdd1b-9af4-4f6f-bf15-75b8208249e6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.243869 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk" (OuterVolumeSpecName: "kube-api-access-n2skk") pod "829cdd1b-9af4-4f6f-bf15-75b8208249e6" (UID: "829cdd1b-9af4-4f6f-bf15-75b8208249e6"). InnerVolumeSpecName "kube-api-access-n2skk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.262000 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "829cdd1b-9af4-4f6f-bf15-75b8208249e6" (UID: "829cdd1b-9af4-4f6f-bf15-75b8208249e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.271018 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data" (OuterVolumeSpecName: "config-data") pod "829cdd1b-9af4-4f6f-bf15-75b8208249e6" (UID: "829cdd1b-9af4-4f6f-bf15-75b8208249e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.336901 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.336971 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/829cdd1b-9af4-4f6f-bf15-75b8208249e6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.336986 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2skk\" (UniqueName: \"kubernetes.io/projected/829cdd1b-9af4-4f6f-bf15-75b8208249e6-kube-api-access-n2skk\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.336998 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.337012 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/829cdd1b-9af4-4f6f-bf15-75b8208249e6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.826845 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2bl49" event={"ID":"829cdd1b-9af4-4f6f-bf15-75b8208249e6","Type":"ContainerDied","Data":"20ac289d1f3d78e5671ee901344b4836b054f8dc0019525633786cf41b694762"} Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.826881 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20ac289d1f3d78e5671ee901344b4836b054f8dc0019525633786cf41b694762" Nov 28 16:55:00 crc kubenswrapper[4884]: I1128 16:55:00.826929 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2bl49" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.238494 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-54bd44d4c6-xgndh"] Nov 28 16:55:01 crc kubenswrapper[4884]: E1128 16:55:01.238914 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="829cdd1b-9af4-4f6f-bf15-75b8208249e6" containerName="placement-db-sync" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.238932 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="829cdd1b-9af4-4f6f-bf15-75b8208249e6" containerName="placement-db-sync" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.239193 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="829cdd1b-9af4-4f6f-bf15-75b8208249e6" containerName="placement-db-sync" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.240430 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.242136 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mn5cs" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.242551 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.248860 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.255488 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-54bd44d4c6-xgndh"] Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.357231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-combined-ca-bundle\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.357300 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pfj6\" (UniqueName: \"kubernetes.io/projected/a7acff93-9169-40ef-9c9b-85d14789248a-kube-api-access-6pfj6\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.357534 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-config-data\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.357729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-scripts\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.357848 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7acff93-9169-40ef-9c9b-85d14789248a-logs\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.459462 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-combined-ca-bundle\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.459792 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pfj6\" (UniqueName: \"kubernetes.io/projected/a7acff93-9169-40ef-9c9b-85d14789248a-kube-api-access-6pfj6\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.459854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-config-data\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.459902 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-scripts\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.459952 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7acff93-9169-40ef-9c9b-85d14789248a-logs\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.460533 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7acff93-9169-40ef-9c9b-85d14789248a-logs\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.463673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-combined-ca-bundle\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.467440 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-scripts\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.467917 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7acff93-9169-40ef-9c9b-85d14789248a-config-data\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.477822 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pfj6\" (UniqueName: \"kubernetes.io/projected/a7acff93-9169-40ef-9c9b-85d14789248a-kube-api-access-6pfj6\") pod \"placement-54bd44d4c6-xgndh\" (UID: \"a7acff93-9169-40ef-9c9b-85d14789248a\") " pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:01 crc kubenswrapper[4884]: I1128 16:55:01.561481 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.060739 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-54bd44d4c6-xgndh"] Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.853942 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-54bd44d4c6-xgndh" event={"ID":"a7acff93-9169-40ef-9c9b-85d14789248a","Type":"ContainerStarted","Data":"cac719515f78477beaeb099722b406f341bf6a1543e4c9d5098721fd44397758"} Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.854626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-54bd44d4c6-xgndh" event={"ID":"a7acff93-9169-40ef-9c9b-85d14789248a","Type":"ContainerStarted","Data":"dfd101cb2c607409b22fe1732f2a11a630c6a2c0d73c24b5cc87f04c68a813be"} Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.854649 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.854660 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-54bd44d4c6-xgndh" event={"ID":"a7acff93-9169-40ef-9c9b-85d14789248a","Type":"ContainerStarted","Data":"7f333dacd64324057fd24418915cb39c52da1cef8307ecb1ff0b0fd6dc52ca2a"} Nov 28 16:55:02 crc kubenswrapper[4884]: I1128 16:55:02.880684 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-54bd44d4c6-xgndh" podStartSLOduration=1.880662648 podStartE2EDuration="1.880662648s" podCreationTimestamp="2025-11-28 16:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:55:02.870399777 +0000 UTC m=+5742.433183588" watchObservedRunningTime="2025-11-28 16:55:02.880662648 +0000 UTC m=+5742.443446449" Nov 28 16:55:03 crc kubenswrapper[4884]: I1128 16:55:03.861962 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:06 crc kubenswrapper[4884]: I1128 16:55:06.152234 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:55:06 crc kubenswrapper[4884]: I1128 16:55:06.210848 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:55:06 crc kubenswrapper[4884]: I1128 16:55:06.211166 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="dnsmasq-dns" containerID="cri-o://04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42" gracePeriod=10 Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.694068 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.752846 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb\") pod \"16b0708b-8147-421f-a3da-5d2556683564\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.752964 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqvns\" (UniqueName: \"kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns\") pod \"16b0708b-8147-421f-a3da-5d2556683564\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.753006 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc\") pod \"16b0708b-8147-421f-a3da-5d2556683564\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.753030 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config\") pod \"16b0708b-8147-421f-a3da-5d2556683564\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.753071 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb\") pod \"16b0708b-8147-421f-a3da-5d2556683564\" (UID: \"16b0708b-8147-421f-a3da-5d2556683564\") " Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.762324 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns" (OuterVolumeSpecName: "kube-api-access-kqvns") pod "16b0708b-8147-421f-a3da-5d2556683564" (UID: "16b0708b-8147-421f-a3da-5d2556683564"). InnerVolumeSpecName "kube-api-access-kqvns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.823002 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "16b0708b-8147-421f-a3da-5d2556683564" (UID: "16b0708b-8147-421f-a3da-5d2556683564"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.833441 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "16b0708b-8147-421f-a3da-5d2556683564" (UID: "16b0708b-8147-421f-a3da-5d2556683564"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.838301 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "16b0708b-8147-421f-a3da-5d2556683564" (UID: "16b0708b-8147-421f-a3da-5d2556683564"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.850264 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config" (OuterVolumeSpecName: "config") pod "16b0708b-8147-421f-a3da-5d2556683564" (UID: "16b0708b-8147-421f-a3da-5d2556683564"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.855025 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqvns\" (UniqueName: \"kubernetes.io/projected/16b0708b-8147-421f-a3da-5d2556683564-kube-api-access-kqvns\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.855051 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.855062 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.855072 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.855082 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16b0708b-8147-421f-a3da-5d2556683564-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.888334 4884 generic.go:334] "Generic (PLEG): container finished" podID="16b0708b-8147-421f-a3da-5d2556683564" containerID="04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42" exitCode=0 Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.888386 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" event={"ID":"16b0708b-8147-421f-a3da-5d2556683564","Type":"ContainerDied","Data":"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42"} Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.888417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" event={"ID":"16b0708b-8147-421f-a3da-5d2556683564","Type":"ContainerDied","Data":"29c960b7f6ca38245df8323508305f2c455543d4fcfb37fe7e0f1f60a26f8379"} Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.888454 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6656ccfb9c-92sfp" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.888462 4884 scope.go:117] "RemoveContainer" containerID="04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.922332 4884 scope.go:117] "RemoveContainer" containerID="330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.926446 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.937034 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6656ccfb9c-92sfp"] Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.962509 4884 scope.go:117] "RemoveContainer" containerID="04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42" Nov 28 16:55:07 crc kubenswrapper[4884]: E1128 16:55:06.962875 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42\": container with ID starting with 04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42 not found: ID does not exist" containerID="04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.962910 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42"} err="failed to get container status \"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42\": rpc error: code = NotFound desc = could not find container \"04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42\": container with ID starting with 04e340fbfcaabf4a386569156b0f623cd1db8129e15feb0f6ca033185f742a42 not found: ID does not exist" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.962947 4884 scope.go:117] "RemoveContainer" containerID="330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4" Nov 28 16:55:07 crc kubenswrapper[4884]: E1128 16:55:06.963335 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4\": container with ID starting with 330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4 not found: ID does not exist" containerID="330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4" Nov 28 16:55:07 crc kubenswrapper[4884]: I1128 16:55:06.963359 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4"} err="failed to get container status \"330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4\": rpc error: code = NotFound desc = could not find container \"330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4\": container with ID starting with 330369ceb10be045a39b1d99da95c7a1d595ea1395ec2a076e92a0b25909c7f4 not found: ID does not exist" Nov 28 16:55:08 crc kubenswrapper[4884]: I1128 16:55:08.699007 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16b0708b-8147-421f-a3da-5d2556683564" path="/var/lib/kubelet/pods/16b0708b-8147-421f-a3da-5d2556683564/volumes" Nov 28 16:55:21 crc kubenswrapper[4884]: I1128 16:55:21.243267 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:55:21 crc kubenswrapper[4884]: I1128 16:55:21.243882 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:55:30 crc kubenswrapper[4884]: I1128 16:55:30.209124 4884 scope.go:117] "RemoveContainer" containerID="f5e4d6b272f322b4cc40694984b9ba179a9bc9ef2a3831b53d793ae596d67a8b" Nov 28 16:55:30 crc kubenswrapper[4884]: I1128 16:55:30.236200 4884 scope.go:117] "RemoveContainer" containerID="768cd946d195b086b527d9cb598392ceccd0270745f82dc10160d86ba39211de" Nov 28 16:55:32 crc kubenswrapper[4884]: I1128 16:55:32.885180 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:32 crc kubenswrapper[4884]: I1128 16:55:32.917810 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-54bd44d4c6-xgndh" Nov 28 16:55:51 crc kubenswrapper[4884]: I1128 16:55:51.243173 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:55:51 crc kubenswrapper[4884]: I1128 16:55:51.243714 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:55:51 crc kubenswrapper[4884]: I1128 16:55:51.243761 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 16:55:51 crc kubenswrapper[4884]: I1128 16:55:51.244401 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:55:51 crc kubenswrapper[4884]: I1128 16:55:51.244451 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" gracePeriod=600 Nov 28 16:55:51 crc kubenswrapper[4884]: E1128 16:55:51.361751 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.277615 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" exitCode=0 Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.277659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027"} Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.277708 4884 scope.go:117] "RemoveContainer" containerID="48de67cda51bf73805c75e12b81816f10effd891069b56fd2588291f4c432eee" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.278162 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:55:52 crc kubenswrapper[4884]: E1128 16:55:52.278540 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.764722 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:55:52 crc kubenswrapper[4884]: E1128 16:55:52.765650 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="dnsmasq-dns" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.765685 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="dnsmasq-dns" Nov 28 16:55:52 crc kubenswrapper[4884]: E1128 16:55:52.765708 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="init" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.765717 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="init" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.766017 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b0708b-8147-421f-a3da-5d2556683564" containerName="dnsmasq-dns" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.767767 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.774045 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.894609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.894652 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9vxh\" (UniqueName: \"kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.894966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.996014 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.996125 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.996158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9vxh\" (UniqueName: \"kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.996615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:52 crc kubenswrapper[4884]: I1128 16:55:52.996663 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.017483 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9vxh\" (UniqueName: \"kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh\") pod \"certified-operators-5mxlp\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.098388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.674620 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.915729 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-hktq5"] Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.917069 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.919525 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsmcv\" (UniqueName: \"kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv\") pod \"nova-api-db-create-hktq5\" (UID: \"027e80a9-c0fd-47ec-8614-4ed267b8db45\") " pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:53 crc kubenswrapper[4884]: I1128 16:55:53.925573 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hktq5"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.012268 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-zmdmx"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.013453 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.021595 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsmcv\" (UniqueName: \"kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv\") pod \"nova-api-db-create-hktq5\" (UID: \"027e80a9-c0fd-47ec-8614-4ed267b8db45\") " pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.021741 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whzlb\" (UniqueName: \"kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb\") pod \"nova-cell0-db-create-zmdmx\" (UID: \"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d\") " pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.022663 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-zmdmx"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.050553 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsmcv\" (UniqueName: \"kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv\") pod \"nova-api-db-create-hktq5\" (UID: \"027e80a9-c0fd-47ec-8614-4ed267b8db45\") " pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.118707 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-fz74w"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.119895 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.124055 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whzlb\" (UniqueName: \"kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb\") pod \"nova-cell0-db-create-zmdmx\" (UID: \"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d\") " pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.136366 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fz74w"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.144046 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whzlb\" (UniqueName: \"kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb\") pod \"nova-cell0-db-create-zmdmx\" (UID: \"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d\") " pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.225619 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj2mr\" (UniqueName: \"kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr\") pod \"nova-cell1-db-create-fz74w\" (UID: \"b9f6705f-b8b0-43f7-b5d7-274032c45b93\") " pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.287029 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.307479 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerID="3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77" exitCode=0 Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.307529 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerDied","Data":"3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77"} Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.307557 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerStarted","Data":"5d4a55389a2acc7fada3ac108049a1e085b3ed298678f15d83313a9db2251d2e"} Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.311990 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.328522 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj2mr\" (UniqueName: \"kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr\") pod \"nova-cell1-db-create-fz74w\" (UID: \"b9f6705f-b8b0-43f7-b5d7-274032c45b93\") " pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.330790 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.347491 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj2mr\" (UniqueName: \"kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr\") pod \"nova-cell1-db-create-fz74w\" (UID: \"b9f6705f-b8b0-43f7-b5d7-274032c45b93\") " pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.446777 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:54 crc kubenswrapper[4884]: W1128 16:55:54.798141 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod027e80a9_c0fd_47ec_8614_4ed267b8db45.slice/crio-40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db WatchSource:0}: Error finding container 40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db: Status 404 returned error can't find the container with id 40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.800267 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hktq5"] Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.877194 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-zmdmx"] Nov 28 16:55:54 crc kubenswrapper[4884]: W1128 16:55:54.896365 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8a0d755_8ab0_437f_a2c3_b1d7a1fb043d.slice/crio-41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033 WatchSource:0}: Error finding container 41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033: Status 404 returned error can't find the container with id 41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033 Nov 28 16:55:54 crc kubenswrapper[4884]: I1128 16:55:54.979993 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fz74w"] Nov 28 16:55:54 crc kubenswrapper[4884]: W1128 16:55:54.992399 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9f6705f_b8b0_43f7_b5d7_274032c45b93.slice/crio-63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446 WatchSource:0}: Error finding container 63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446: Status 404 returned error can't find the container with id 63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446 Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.324280 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fz74w" event={"ID":"b9f6705f-b8b0-43f7-b5d7-274032c45b93","Type":"ContainerStarted","Data":"63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446"} Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.326020 4884 generic.go:334] "Generic (PLEG): container finished" podID="027e80a9-c0fd-47ec-8614-4ed267b8db45" containerID="7521e0680f6d1dcf83d7edeabcc49c102cdaa16f996225d4439c6708b94a820c" exitCode=0 Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.326116 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hktq5" event={"ID":"027e80a9-c0fd-47ec-8614-4ed267b8db45","Type":"ContainerDied","Data":"7521e0680f6d1dcf83d7edeabcc49c102cdaa16f996225d4439c6708b94a820c"} Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.326182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hktq5" event={"ID":"027e80a9-c0fd-47ec-8614-4ed267b8db45","Type":"ContainerStarted","Data":"40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db"} Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.327883 4884 generic.go:334] "Generic (PLEG): container finished" podID="d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" containerID="b8bf57a6c80c5839eb6d7ec044fe5d59725f8cafe2b62160c0e19a6bf12b6e89" exitCode=0 Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.327924 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zmdmx" event={"ID":"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d","Type":"ContainerDied","Data":"b8bf57a6c80c5839eb6d7ec044fe5d59725f8cafe2b62160c0e19a6bf12b6e89"} Nov 28 16:55:55 crc kubenswrapper[4884]: I1128 16:55:55.327945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zmdmx" event={"ID":"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d","Type":"ContainerStarted","Data":"41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033"} Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.339954 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerID="a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08" exitCode=0 Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.340014 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerDied","Data":"a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08"} Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.343904 4884 generic.go:334] "Generic (PLEG): container finished" podID="b9f6705f-b8b0-43f7-b5d7-274032c45b93" containerID="221ee9f0a947f73eaad0856d220a679b5b52e19cbfd3c2601c0e97f7f984c24b" exitCode=0 Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.344011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fz74w" event={"ID":"b9f6705f-b8b0-43f7-b5d7-274032c45b93","Type":"ContainerDied","Data":"221ee9f0a947f73eaad0856d220a679b5b52e19cbfd3c2601c0e97f7f984c24b"} Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.766152 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.774482 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.890201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whzlb\" (UniqueName: \"kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb\") pod \"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d\" (UID: \"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d\") " Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.890506 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsmcv\" (UniqueName: \"kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv\") pod \"027e80a9-c0fd-47ec-8614-4ed267b8db45\" (UID: \"027e80a9-c0fd-47ec-8614-4ed267b8db45\") " Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.896802 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb" (OuterVolumeSpecName: "kube-api-access-whzlb") pod "d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" (UID: "d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d"). InnerVolumeSpecName "kube-api-access-whzlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.897400 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv" (OuterVolumeSpecName: "kube-api-access-nsmcv") pod "027e80a9-c0fd-47ec-8614-4ed267b8db45" (UID: "027e80a9-c0fd-47ec-8614-4ed267b8db45"). InnerVolumeSpecName "kube-api-access-nsmcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.992598 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsmcv\" (UniqueName: \"kubernetes.io/projected/027e80a9-c0fd-47ec-8614-4ed267b8db45-kube-api-access-nsmcv\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:56 crc kubenswrapper[4884]: I1128 16:55:56.992650 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whzlb\" (UniqueName: \"kubernetes.io/projected/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d-kube-api-access-whzlb\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.356459 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hktq5" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.356523 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hktq5" event={"ID":"027e80a9-c0fd-47ec-8614-4ed267b8db45","Type":"ContainerDied","Data":"40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db"} Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.356597 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40fd6cb14e86fbe3bb07a21f4f831cb869169c71165837a0083e7b9b52b267db" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.358757 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-zmdmx" event={"ID":"d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d","Type":"ContainerDied","Data":"41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033"} Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.358872 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41bc8b3f5f71dc67a65e87f08fa79d0d42e2844e5fc1dbe9f7154e062a606033" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.358753 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-zmdmx" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.607647 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.704645 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj2mr\" (UniqueName: \"kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr\") pod \"b9f6705f-b8b0-43f7-b5d7-274032c45b93\" (UID: \"b9f6705f-b8b0-43f7-b5d7-274032c45b93\") " Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.708986 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr" (OuterVolumeSpecName: "kube-api-access-jj2mr") pod "b9f6705f-b8b0-43f7-b5d7-274032c45b93" (UID: "b9f6705f-b8b0-43f7-b5d7-274032c45b93"). InnerVolumeSpecName "kube-api-access-jj2mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:55:57 crc kubenswrapper[4884]: I1128 16:55:57.806558 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj2mr\" (UniqueName: \"kubernetes.io/projected/b9f6705f-b8b0-43f7-b5d7-274032c45b93-kube-api-access-jj2mr\") on node \"crc\" DevicePath \"\"" Nov 28 16:55:58 crc kubenswrapper[4884]: I1128 16:55:58.370616 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fz74w" Nov 28 16:55:58 crc kubenswrapper[4884]: I1128 16:55:58.370624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fz74w" event={"ID":"b9f6705f-b8b0-43f7-b5d7-274032c45b93","Type":"ContainerDied","Data":"63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446"} Nov 28 16:55:58 crc kubenswrapper[4884]: I1128 16:55:58.370764 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63b9a542f782eb4cba235ff9383b5ef72770563c16653c0f1f69e9d864b08446" Nov 28 16:55:58 crc kubenswrapper[4884]: I1128 16:55:58.373677 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerStarted","Data":"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e"} Nov 28 16:55:58 crc kubenswrapper[4884]: I1128 16:55:58.401820 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5mxlp" podStartSLOduration=3.487102679 podStartE2EDuration="6.401799414s" podCreationTimestamp="2025-11-28 16:55:52 +0000 UTC" firstStartedPulling="2025-11-28 16:55:54.311695465 +0000 UTC m=+5793.874479266" lastFinishedPulling="2025-11-28 16:55:57.2263922 +0000 UTC m=+5796.789176001" observedRunningTime="2025-11-28 16:55:58.394075945 +0000 UTC m=+5797.956859766" watchObservedRunningTime="2025-11-28 16:55:58.401799414 +0000 UTC m=+5797.964583235" Nov 28 16:56:03 crc kubenswrapper[4884]: I1128 16:56:03.099534 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:03 crc kubenswrapper[4884]: I1128 16:56:03.100008 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:03 crc kubenswrapper[4884]: I1128 16:56:03.151742 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:03 crc kubenswrapper[4884]: I1128 16:56:03.472715 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:03 crc kubenswrapper[4884]: I1128 16:56:03.535890 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.156293 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2078-account-create-8xlqx"] Nov 28 16:56:04 crc kubenswrapper[4884]: E1128 16:56:04.156733 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="027e80a9-c0fd-47ec-8614-4ed267b8db45" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.156748 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="027e80a9-c0fd-47ec-8614-4ed267b8db45" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: E1128 16:56:04.156778 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9f6705f-b8b0-43f7-b5d7-274032c45b93" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.156784 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9f6705f-b8b0-43f7-b5d7-274032c45b93" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: E1128 16:56:04.156802 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.156809 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.157005 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="027e80a9-c0fd-47ec-8614-4ed267b8db45" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.157033 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9f6705f-b8b0-43f7-b5d7-274032c45b93" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.157048 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" containerName="mariadb-database-create" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.157873 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.167592 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.190834 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmx7f\" (UniqueName: \"kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f\") pod \"nova-api-2078-account-create-8xlqx\" (UID: \"15e19289-bb36-40c5-abca-c9707c889094\") " pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.197370 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2078-account-create-8xlqx"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.292781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmx7f\" (UniqueName: \"kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f\") pod \"nova-api-2078-account-create-8xlqx\" (UID: \"15e19289-bb36-40c5-abca-c9707c889094\") " pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.315588 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmx7f\" (UniqueName: \"kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f\") pod \"nova-api-2078-account-create-8xlqx\" (UID: \"15e19289-bb36-40c5-abca-c9707c889094\") " pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.349720 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-5b90-account-create-fzm75"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.351208 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.353640 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.378275 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-5b90-account-create-fzm75"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.491069 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.496101 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ftmr\" (UniqueName: \"kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr\") pod \"nova-cell0-5b90-account-create-fzm75\" (UID: \"3b46b792-bae5-4ec1-b696-3219b34136f2\") " pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.556133 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-fb5d-account-create-plzj8"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.558606 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.562786 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.563490 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-fb5d-account-create-plzj8"] Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.597212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ftmr\" (UniqueName: \"kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr\") pod \"nova-cell0-5b90-account-create-fzm75\" (UID: \"3b46b792-bae5-4ec1-b696-3219b34136f2\") " pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.616682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ftmr\" (UniqueName: \"kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr\") pod \"nova-cell0-5b90-account-create-fzm75\" (UID: \"3b46b792-bae5-4ec1-b696-3219b34136f2\") " pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.684286 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.690534 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:56:04 crc kubenswrapper[4884]: E1128 16:56:04.690900 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.698820 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4xhp\" (UniqueName: \"kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp\") pod \"nova-cell1-fb5d-account-create-plzj8\" (UID: \"ee34c725-d8dc-4f85-a494-7d35ddfb9b54\") " pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.800246 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4xhp\" (UniqueName: \"kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp\") pod \"nova-cell1-fb5d-account-create-plzj8\" (UID: \"ee34c725-d8dc-4f85-a494-7d35ddfb9b54\") " pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.823201 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4xhp\" (UniqueName: \"kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp\") pod \"nova-cell1-fb5d-account-create-plzj8\" (UID: \"ee34c725-d8dc-4f85-a494-7d35ddfb9b54\") " pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.960649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:04 crc kubenswrapper[4884]: I1128 16:56:04.981174 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2078-account-create-8xlqx"] Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.103047 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-5b90-account-create-fzm75"] Nov 28 16:56:05 crc kubenswrapper[4884]: W1128 16:56:05.113061 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b46b792_bae5_4ec1_b696_3219b34136f2.slice/crio-c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d WatchSource:0}: Error finding container c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d: Status 404 returned error can't find the container with id c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.394765 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-fb5d-account-create-plzj8"] Nov 28 16:56:05 crc kubenswrapper[4884]: W1128 16:56:05.430205 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee34c725_d8dc_4f85_a494_7d35ddfb9b54.slice/crio-49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2 WatchSource:0}: Error finding container 49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2: Status 404 returned error can't find the container with id 49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2 Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.450470 4884 generic.go:334] "Generic (PLEG): container finished" podID="3b46b792-bae5-4ec1-b696-3219b34136f2" containerID="18ddab20017ff8adaf8fb3133e37bceae9a0b075618a6328ea97838c2e7dfae2" exitCode=0 Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.450511 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5b90-account-create-fzm75" event={"ID":"3b46b792-bae5-4ec1-b696-3219b34136f2","Type":"ContainerDied","Data":"18ddab20017ff8adaf8fb3133e37bceae9a0b075618a6328ea97838c2e7dfae2"} Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.450555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5b90-account-create-fzm75" event={"ID":"3b46b792-bae5-4ec1-b696-3219b34136f2","Type":"ContainerStarted","Data":"c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d"} Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.452432 4884 generic.go:334] "Generic (PLEG): container finished" podID="15e19289-bb36-40c5-abca-c9707c889094" containerID="62a6840da3b8a531145a1c13670e60c24403673ce0a33b753f55ef189a8fd612" exitCode=0 Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.452528 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2078-account-create-8xlqx" event={"ID":"15e19289-bb36-40c5-abca-c9707c889094","Type":"ContainerDied","Data":"62a6840da3b8a531145a1c13670e60c24403673ce0a33b753f55ef189a8fd612"} Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.452576 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2078-account-create-8xlqx" event={"ID":"15e19289-bb36-40c5-abca-c9707c889094","Type":"ContainerStarted","Data":"06364090b74b21fc7fe1e9a9c02f81b2a3d22efbcc0b7aafba47cba06ad4c421"} Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.456155 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fb5d-account-create-plzj8" event={"ID":"ee34c725-d8dc-4f85-a494-7d35ddfb9b54","Type":"ContainerStarted","Data":"49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2"} Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.456353 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5mxlp" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="registry-server" containerID="cri-o://f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e" gracePeriod=2 Nov 28 16:56:05 crc kubenswrapper[4884]: I1128 16:56:05.881271 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.019652 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities\") pod \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.019731 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content\") pod \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.019785 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9vxh\" (UniqueName: \"kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh\") pod \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\" (UID: \"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df\") " Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.020818 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities" (OuterVolumeSpecName: "utilities") pod "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" (UID: "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.026411 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh" (OuterVolumeSpecName: "kube-api-access-j9vxh") pod "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" (UID: "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df"). InnerVolumeSpecName "kube-api-access-j9vxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.071915 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" (UID: "8a3eaf5a-b721-46ce-aa21-1c0db86ea2df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.122045 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.122241 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.122254 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9vxh\" (UniqueName: \"kubernetes.io/projected/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df-kube-api-access-j9vxh\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.471440 4884 generic.go:334] "Generic (PLEG): container finished" podID="ee34c725-d8dc-4f85-a494-7d35ddfb9b54" containerID="4a024e7059adf7516f6ced612dd32290706d012f1173169999cc33bc32753746" exitCode=0 Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.471536 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fb5d-account-create-plzj8" event={"ID":"ee34c725-d8dc-4f85-a494-7d35ddfb9b54","Type":"ContainerDied","Data":"4a024e7059adf7516f6ced612dd32290706d012f1173169999cc33bc32753746"} Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.474619 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerID="f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e" exitCode=0 Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.475033 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5mxlp" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.475342 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerDied","Data":"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e"} Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.475489 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5mxlp" event={"ID":"8a3eaf5a-b721-46ce-aa21-1c0db86ea2df","Type":"ContainerDied","Data":"5d4a55389a2acc7fada3ac108049a1e085b3ed298678f15d83313a9db2251d2e"} Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.475658 4884 scope.go:117] "RemoveContainer" containerID="f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.504909 4884 scope.go:117] "RemoveContainer" containerID="a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.527638 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.534770 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5mxlp"] Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.550406 4884 scope.go:117] "RemoveContainer" containerID="3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.571902 4884 scope.go:117] "RemoveContainer" containerID="f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e" Nov 28 16:56:06 crc kubenswrapper[4884]: E1128 16:56:06.572271 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e\": container with ID starting with f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e not found: ID does not exist" containerID="f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.572300 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e"} err="failed to get container status \"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e\": rpc error: code = NotFound desc = could not find container \"f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e\": container with ID starting with f8c167699e7d2f35e03c6a7d931ac142576685e39aff496c3cce3261c217d71e not found: ID does not exist" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.572321 4884 scope.go:117] "RemoveContainer" containerID="a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08" Nov 28 16:56:06 crc kubenswrapper[4884]: E1128 16:56:06.572843 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08\": container with ID starting with a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08 not found: ID does not exist" containerID="a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.572862 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08"} err="failed to get container status \"a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08\": rpc error: code = NotFound desc = could not find container \"a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08\": container with ID starting with a81d61be259dac4509e0516d81d9dd67948601db3bf3acbb9ae602e3b57d7e08 not found: ID does not exist" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.572876 4884 scope.go:117] "RemoveContainer" containerID="3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77" Nov 28 16:56:06 crc kubenswrapper[4884]: E1128 16:56:06.573252 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77\": container with ID starting with 3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77 not found: ID does not exist" containerID="3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.573306 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77"} err="failed to get container status \"3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77\": rpc error: code = NotFound desc = could not find container \"3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77\": container with ID starting with 3f7f7c4a35abd26fc38fc0309c5eb47160ad30485b58d24a6df6d8a7ba830f77 not found: ID does not exist" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.707317 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" path="/var/lib/kubelet/pods/8a3eaf5a-b721-46ce-aa21-1c0db86ea2df/volumes" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.849190 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.858821 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.937767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmx7f\" (UniqueName: \"kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f\") pod \"15e19289-bb36-40c5-abca-c9707c889094\" (UID: \"15e19289-bb36-40c5-abca-c9707c889094\") " Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.937835 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ftmr\" (UniqueName: \"kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr\") pod \"3b46b792-bae5-4ec1-b696-3219b34136f2\" (UID: \"3b46b792-bae5-4ec1-b696-3219b34136f2\") " Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.941175 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f" (OuterVolumeSpecName: "kube-api-access-bmx7f") pod "15e19289-bb36-40c5-abca-c9707c889094" (UID: "15e19289-bb36-40c5-abca-c9707c889094"). InnerVolumeSpecName "kube-api-access-bmx7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:06 crc kubenswrapper[4884]: I1128 16:56:06.941237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr" (OuterVolumeSpecName: "kube-api-access-8ftmr") pod "3b46b792-bae5-4ec1-b696-3219b34136f2" (UID: "3b46b792-bae5-4ec1-b696-3219b34136f2"). InnerVolumeSpecName "kube-api-access-8ftmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.040306 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmx7f\" (UniqueName: \"kubernetes.io/projected/15e19289-bb36-40c5-abca-c9707c889094-kube-api-access-bmx7f\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.040448 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ftmr\" (UniqueName: \"kubernetes.io/projected/3b46b792-bae5-4ec1-b696-3219b34136f2-kube-api-access-8ftmr\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.488472 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-5b90-account-create-fzm75" event={"ID":"3b46b792-bae5-4ec1-b696-3219b34136f2","Type":"ContainerDied","Data":"c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d"} Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.488518 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8e32f17bdc1a2806ec737e9ff3549d375e2f87596200495dce85284d4bd410d" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.488522 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-5b90-account-create-fzm75" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.490665 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2078-account-create-8xlqx" event={"ID":"15e19289-bb36-40c5-abca-c9707c889094","Type":"ContainerDied","Data":"06364090b74b21fc7fe1e9a9c02f81b2a3d22efbcc0b7aafba47cba06ad4c421"} Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.490691 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2078-account-create-8xlqx" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.490714 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="06364090b74b21fc7fe1e9a9c02f81b2a3d22efbcc0b7aafba47cba06ad4c421" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.801147 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.855587 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4xhp\" (UniqueName: \"kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp\") pod \"ee34c725-d8dc-4f85-a494-7d35ddfb9b54\" (UID: \"ee34c725-d8dc-4f85-a494-7d35ddfb9b54\") " Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.860456 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp" (OuterVolumeSpecName: "kube-api-access-f4xhp") pod "ee34c725-d8dc-4f85-a494-7d35ddfb9b54" (UID: "ee34c725-d8dc-4f85-a494-7d35ddfb9b54"). InnerVolumeSpecName "kube-api-access-f4xhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:07 crc kubenswrapper[4884]: I1128 16:56:07.958236 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4xhp\" (UniqueName: \"kubernetes.io/projected/ee34c725-d8dc-4f85-a494-7d35ddfb9b54-kube-api-access-f4xhp\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:08 crc kubenswrapper[4884]: I1128 16:56:08.501558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-fb5d-account-create-plzj8" event={"ID":"ee34c725-d8dc-4f85-a494-7d35ddfb9b54","Type":"ContainerDied","Data":"49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2"} Nov 28 16:56:08 crc kubenswrapper[4884]: I1128 16:56:08.501599 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49fbf25da2ff72c93eda57fd4cdf520d2d74add93e3ef441c9c49d188a8bdfc2" Nov 28 16:56:08 crc kubenswrapper[4884]: I1128 16:56:08.501647 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-fb5d-account-create-plzj8" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.531697 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fvzhr"] Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532033 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b46b792-bae5-4ec1-b696-3219b34136f2" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532045 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b46b792-bae5-4ec1-b696-3219b34136f2" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532053 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="extract-content" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532059 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="extract-content" Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532085 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="extract-utilities" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532106 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="extract-utilities" Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532117 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee34c725-d8dc-4f85-a494-7d35ddfb9b54" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532122 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee34c725-d8dc-4f85-a494-7d35ddfb9b54" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532131 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="registry-server" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532137 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="registry-server" Nov 28 16:56:09 crc kubenswrapper[4884]: E1128 16:56:09.532155 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e19289-bb36-40c5-abca-c9707c889094" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532161 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e19289-bb36-40c5-abca-c9707c889094" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532303 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a3eaf5a-b721-46ce-aa21-1c0db86ea2df" containerName="registry-server" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532327 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee34c725-d8dc-4f85-a494-7d35ddfb9b54" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532342 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b46b792-bae5-4ec1-b696-3219b34136f2" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532355 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e19289-bb36-40c5-abca-c9707c889094" containerName="mariadb-account-create" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.532901 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.535771 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.541979 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fvzhr"] Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.544853 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.545057 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8zvdj" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.588648 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnvkx\" (UniqueName: \"kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.588696 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.588992 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.589075 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.690749 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnvkx\" (UniqueName: \"kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.690815 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.690886 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.690916 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.696358 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.696545 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.697705 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.709161 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnvkx\" (UniqueName: \"kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx\") pod \"nova-cell0-conductor-db-sync-fvzhr\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:09 crc kubenswrapper[4884]: I1128 16:56:09.850001 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:10 crc kubenswrapper[4884]: I1128 16:56:10.297867 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fvzhr"] Nov 28 16:56:10 crc kubenswrapper[4884]: I1128 16:56:10.517453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" event={"ID":"71c60737-59f8-414b-857d-bea11d085a23","Type":"ContainerStarted","Data":"54128fa3596a8e2e427db3cf08f35ef26934ca8d1779fcb9c8f3f779664f336c"} Nov 28 16:56:11 crc kubenswrapper[4884]: I1128 16:56:11.530434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" event={"ID":"71c60737-59f8-414b-857d-bea11d085a23","Type":"ContainerStarted","Data":"eb9bf7b638943e5d4598e6b4d7c59fe1e44e624179d0964178190014d9c9889f"} Nov 28 16:56:11 crc kubenswrapper[4884]: I1128 16:56:11.553508 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" podStartSLOduration=2.553490761 podStartE2EDuration="2.553490761s" podCreationTimestamp="2025-11-28 16:56:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:11.551714268 +0000 UTC m=+5811.114498099" watchObservedRunningTime="2025-11-28 16:56:11.553490761 +0000 UTC m=+5811.116274562" Nov 28 16:56:15 crc kubenswrapper[4884]: I1128 16:56:15.567392 4884 generic.go:334] "Generic (PLEG): container finished" podID="71c60737-59f8-414b-857d-bea11d085a23" containerID="eb9bf7b638943e5d4598e6b4d7c59fe1e44e624179d0964178190014d9c9889f" exitCode=0 Nov 28 16:56:15 crc kubenswrapper[4884]: I1128 16:56:15.567540 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" event={"ID":"71c60737-59f8-414b-857d-bea11d085a23","Type":"ContainerDied","Data":"eb9bf7b638943e5d4598e6b4d7c59fe1e44e624179d0964178190014d9c9889f"} Nov 28 16:56:16 crc kubenswrapper[4884]: I1128 16:56:16.903059 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.045653 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data\") pod \"71c60737-59f8-414b-857d-bea11d085a23\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.045896 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnvkx\" (UniqueName: \"kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx\") pod \"71c60737-59f8-414b-857d-bea11d085a23\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.046621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts\") pod \"71c60737-59f8-414b-857d-bea11d085a23\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.046677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle\") pod \"71c60737-59f8-414b-857d-bea11d085a23\" (UID: \"71c60737-59f8-414b-857d-bea11d085a23\") " Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.052025 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts" (OuterVolumeSpecName: "scripts") pod "71c60737-59f8-414b-857d-bea11d085a23" (UID: "71c60737-59f8-414b-857d-bea11d085a23"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.052131 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx" (OuterVolumeSpecName: "kube-api-access-jnvkx") pod "71c60737-59f8-414b-857d-bea11d085a23" (UID: "71c60737-59f8-414b-857d-bea11d085a23"). InnerVolumeSpecName "kube-api-access-jnvkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.072806 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data" (OuterVolumeSpecName: "config-data") pod "71c60737-59f8-414b-857d-bea11d085a23" (UID: "71c60737-59f8-414b-857d-bea11d085a23"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.081270 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71c60737-59f8-414b-857d-bea11d085a23" (UID: "71c60737-59f8-414b-857d-bea11d085a23"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.148957 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnvkx\" (UniqueName: \"kubernetes.io/projected/71c60737-59f8-414b-857d-bea11d085a23-kube-api-access-jnvkx\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.148983 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.148994 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.149004 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71c60737-59f8-414b-857d-bea11d085a23-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.593141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" event={"ID":"71c60737-59f8-414b-857d-bea11d085a23","Type":"ContainerDied","Data":"54128fa3596a8e2e427db3cf08f35ef26934ca8d1779fcb9c8f3f779664f336c"} Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.593193 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54128fa3596a8e2e427db3cf08f35ef26934ca8d1779fcb9c8f3f779664f336c" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.593261 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fvzhr" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.671024 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:56:17 crc kubenswrapper[4884]: E1128 16:56:17.671576 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71c60737-59f8-414b-857d-bea11d085a23" containerName="nova-cell0-conductor-db-sync" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.671600 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="71c60737-59f8-414b-857d-bea11d085a23" containerName="nova-cell0-conductor-db-sync" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.671840 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="71c60737-59f8-414b-857d-bea11d085a23" containerName="nova-cell0-conductor-db-sync" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.672618 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.675972 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.676554 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8zvdj" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.683209 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.759179 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.759225 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.759243 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n86k7\" (UniqueName: \"kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.860772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.860808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n86k7\" (UniqueName: \"kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.860944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.864569 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.867361 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.878332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n86k7\" (UniqueName: \"kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7\") pod \"nova-cell0-conductor-0\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:17 crc kubenswrapper[4884]: I1128 16:56:17.995935 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:18 crc kubenswrapper[4884]: I1128 16:56:18.474775 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:56:18 crc kubenswrapper[4884]: W1128 16:56:18.486409 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a85ed4d_ec4b_4ac1_b5b6_0abc95f97d7a.slice/crio-947a1c2d3e795cc7f048ce530666517e7df7faf7b61acbe38416992695b89239 WatchSource:0}: Error finding container 947a1c2d3e795cc7f048ce530666517e7df7faf7b61acbe38416992695b89239: Status 404 returned error can't find the container with id 947a1c2d3e795cc7f048ce530666517e7df7faf7b61acbe38416992695b89239 Nov 28 16:56:18 crc kubenswrapper[4884]: I1128 16:56:18.608000 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a","Type":"ContainerStarted","Data":"947a1c2d3e795cc7f048ce530666517e7df7faf7b61acbe38416992695b89239"} Nov 28 16:56:18 crc kubenswrapper[4884]: I1128 16:56:18.689763 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:56:18 crc kubenswrapper[4884]: E1128 16:56:18.690310 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:56:19 crc kubenswrapper[4884]: I1128 16:56:19.620906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a","Type":"ContainerStarted","Data":"5a9d45c2f5bf198e2fd564802cd4d60507f8ab22f928d7f0d518b7902144efc4"} Nov 28 16:56:19 crc kubenswrapper[4884]: I1128 16:56:19.622047 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:19 crc kubenswrapper[4884]: I1128 16:56:19.645879 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.645862471 podStartE2EDuration="2.645862471s" podCreationTimestamp="2025-11-28 16:56:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:19.642226342 +0000 UTC m=+5819.205010143" watchObservedRunningTime="2025-11-28 16:56:19.645862471 +0000 UTC m=+5819.208646272" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.031286 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.491312 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-kqdzv"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.498452 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.500324 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kqdzv"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.501314 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.502465 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.556330 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.556420 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.556478 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.556505 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvcp7\" (UniqueName: \"kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.569943 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.571468 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.579136 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.584029 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659649 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvcp7\" (UniqueName: \"kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659714 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bg8r\" (UniqueName: \"kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659790 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.659844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.673644 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.677478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.692675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.707360 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvcp7\" (UniqueName: \"kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7\") pod \"nova-cell0-cell-mapping-kqdzv\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.742857 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.760494 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.760624 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.762289 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bg8r\" (UniqueName: \"kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.762397 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.762486 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.773030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.775891 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.779240 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.780434 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.781702 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.790694 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.805156 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bg8r\" (UniqueName: \"kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r\") pod \"nova-scheduler-0\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.823146 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.827819 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86rdz\" (UniqueName: \"kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866243 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsh7q\" (UniqueName: \"kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866323 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.866343 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.902738 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.904776 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.909911 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.928216 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.938415 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.940445 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.941844 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.962137 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.967803 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.967879 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.972739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.972794 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.972812 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.972872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973007 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973078 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9twxh\" (UniqueName: \"kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973153 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973173 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86rdz\" (UniqueName: \"kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973258 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slmbq\" (UniqueName: \"kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.973321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsh7q\" (UniqueName: \"kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.979104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.979152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.980559 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.986478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.990417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.995173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsh7q\" (UniqueName: \"kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q\") pod \"nova-metadata-0\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " pod="openstack/nova-metadata-0" Nov 28 16:56:28 crc kubenswrapper[4884]: I1128 16:56:28.998577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86rdz\" (UniqueName: \"kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz\") pod \"nova-cell1-novncproxy-0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075171 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075217 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075271 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9twxh\" (UniqueName: \"kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075331 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075380 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slmbq\" (UniqueName: \"kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075407 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.075814 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.077709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.078864 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.080329 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.082650 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.082797 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.086035 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.099702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slmbq\" (UniqueName: \"kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq\") pod \"nova-api-0\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.104735 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9twxh\" (UniqueName: \"kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh\") pod \"dnsmasq-dns-5b56bc7bfc-v7njx\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.178681 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.204251 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.266937 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.278953 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.445722 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kqdzv"] Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.568279 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:29 crc kubenswrapper[4884]: W1128 16:56:29.571874 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45b5578b_5680_45af_b7e4_30860a0fa321.slice/crio-6b5cc785fc3d58dc0d0521d6c40b0166cc37daa2aabd72506df138d468ca967a WatchSource:0}: Error finding container 6b5cc785fc3d58dc0d0521d6c40b0166cc37daa2aabd72506df138d468ca967a: Status 404 returned error can't find the container with id 6b5cc785fc3d58dc0d0521d6c40b0166cc37daa2aabd72506df138d468ca967a Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.651812 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6dsxb"] Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.653473 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.658995 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:56:29 crc kubenswrapper[4884]: W1128 16:56:29.660152 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ad0cb90_122b_49a9_a4bb_7fc3488ad6e0.slice/crio-5dc2928ec3ad88a1bf9392f55d6f993239a1cfe8d944997781f6e5fc48e1fb60 WatchSource:0}: Error finding container 5dc2928ec3ad88a1bf9392f55d6f993239a1cfe8d944997781f6e5fc48e1fb60: Status 404 returned error can't find the container with id 5dc2928ec3ad88a1bf9392f55d6f993239a1cfe8d944997781f6e5fc48e1fb60 Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.664547 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.666901 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.680197 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6dsxb"] Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.744964 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kqdzv" event={"ID":"06cbddb0-9fd4-44dc-b931-ed32606c010d","Type":"ContainerStarted","Data":"2d235d53f918b8d6c6fe74a028da831135b36615e07fabfdaa69bd7e236add57"} Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.746538 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45b5578b-5680-45af-b7e4-30860a0fa321","Type":"ContainerStarted","Data":"6b5cc785fc3d58dc0d0521d6c40b0166cc37daa2aabd72506df138d468ca967a"} Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.748397 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0","Type":"ContainerStarted","Data":"5dc2928ec3ad88a1bf9392f55d6f993239a1cfe8d944997781f6e5fc48e1fb60"} Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.795583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vwx9\" (UniqueName: \"kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.795632 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.795667 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.795732 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.897358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vwx9\" (UniqueName: \"kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.897396 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.897451 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.897488 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.904834 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.904985 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.906658 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.915994 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vwx9\" (UniqueName: \"kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9\") pod \"nova-cell1-conductor-db-sync-6dsxb\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.937682 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:29 crc kubenswrapper[4884]: I1128 16:56:29.967456 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.062392 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:56:30 crc kubenswrapper[4884]: W1128 16:56:30.065869 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa810bba_2b91_426f_8ab9_6ce129794e16.slice/crio-7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83 WatchSource:0}: Error finding container 7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83: Status 404 returned error can't find the container with id 7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83 Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.162413 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.324239 4884 scope.go:117] "RemoveContainer" containerID="22ca90449ac7e3e109d2317d7964d9e10af47081ba9818148188551185b92805" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.633949 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6dsxb"] Nov 28 16:56:30 crc kubenswrapper[4884]: W1128 16:56:30.644183 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46544cf8_e2f2_4788_9d06_27a91f38e9de.slice/crio-fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253 WatchSource:0}: Error finding container fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253: Status 404 returned error can't find the container with id fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253 Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.784240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45b5578b-5680-45af-b7e4-30860a0fa321","Type":"ContainerStarted","Data":"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.787330 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" event={"ID":"46544cf8-e2f2-4788-9d06-27a91f38e9de","Type":"ContainerStarted","Data":"fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.788718 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0","Type":"ContainerStarted","Data":"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.803111 4884 generic.go:334] "Generic (PLEG): container finished" podID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerID="15199d40da9ba76d2b38a76cf92f4317e1da12546d0918483d3e0a779fc8c9c8" exitCode=0 Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.803217 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" event={"ID":"aa810bba-2b91-426f-8ab9-6ce129794e16","Type":"ContainerDied","Data":"15199d40da9ba76d2b38a76cf92f4317e1da12546d0918483d3e0a779fc8c9c8"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.803259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" event={"ID":"aa810bba-2b91-426f-8ab9-6ce129794e16","Type":"ContainerStarted","Data":"7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.826138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerStarted","Data":"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.826421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerStarted","Data":"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.826437 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerStarted","Data":"de30a549ce8297efb968a2edc5dddf20dd5301e5577303402a015b3c765588dd"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.834022 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerStarted","Data":"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.834108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerStarted","Data":"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.834125 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerStarted","Data":"feb8efbadbc07a8d9c499d30721255134d3af955db2cff4bcb72ca13a45ab008"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.843658 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kqdzv" event={"ID":"06cbddb0-9fd4-44dc-b931-ed32606c010d","Type":"ContainerStarted","Data":"850d601e9633378d40912316fea9ac54177f93a36774f682baae8ff8d1d184ab"} Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.880715 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.880690785 podStartE2EDuration="2.880690785s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:30.874006561 +0000 UTC m=+5830.436790362" watchObservedRunningTime="2025-11-28 16:56:30.880690785 +0000 UTC m=+5830.443474586" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.912137 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.912079833 podStartE2EDuration="2.912079833s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:30.893036857 +0000 UTC m=+5830.455820658" watchObservedRunningTime="2025-11-28 16:56:30.912079833 +0000 UTC m=+5830.474863634" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.947820 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.9478013770000002 podStartE2EDuration="2.947801377s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:30.937942976 +0000 UTC m=+5830.500726797" watchObservedRunningTime="2025-11-28 16:56:30.947801377 +0000 UTC m=+5830.510585178" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.969987 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.96995992 podStartE2EDuration="2.96995992s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:30.965613582 +0000 UTC m=+5830.528397393" watchObservedRunningTime="2025-11-28 16:56:30.96995992 +0000 UTC m=+5830.532743721" Nov 28 16:56:30 crc kubenswrapper[4884]: I1128 16:56:30.990618 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-kqdzv" podStartSLOduration=2.990597384 podStartE2EDuration="2.990597384s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:30.983018289 +0000 UTC m=+5830.545802100" watchObservedRunningTime="2025-11-28 16:56:30.990597384 +0000 UTC m=+5830.553381185" Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.689525 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:56:31 crc kubenswrapper[4884]: E1128 16:56:31.689881 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.858566 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" event={"ID":"46544cf8-e2f2-4788-9d06-27a91f38e9de","Type":"ContainerStarted","Data":"109397a594cf0a69b6dd9009cb79b412b045ad2bfc86168000b49673ca7daa90"} Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.865158 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" event={"ID":"aa810bba-2b91-426f-8ab9-6ce129794e16","Type":"ContainerStarted","Data":"9b9496870baeb91518dbeccea8a731f1a5a576163cb7b1f53a4e87f06bf713d3"} Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.865203 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.894567 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" podStartSLOduration=2.894547695 podStartE2EDuration="2.894547695s" podCreationTimestamp="2025-11-28 16:56:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:31.882970352 +0000 UTC m=+5831.445754153" watchObservedRunningTime="2025-11-28 16:56:31.894547695 +0000 UTC m=+5831.457331496" Nov 28 16:56:31 crc kubenswrapper[4884]: I1128 16:56:31.905960 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" podStartSLOduration=3.905937614 podStartE2EDuration="3.905937614s" podCreationTimestamp="2025-11-28 16:56:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:31.900847509 +0000 UTC m=+5831.463631310" watchObservedRunningTime="2025-11-28 16:56:31.905937614 +0000 UTC m=+5831.468721425" Nov 28 16:56:33 crc kubenswrapper[4884]: I1128 16:56:33.943253 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.178859 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.204872 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.204987 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.903170 4884 generic.go:334] "Generic (PLEG): container finished" podID="06cbddb0-9fd4-44dc-b931-ed32606c010d" containerID="850d601e9633378d40912316fea9ac54177f93a36774f682baae8ff8d1d184ab" exitCode=0 Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.903243 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kqdzv" event={"ID":"06cbddb0-9fd4-44dc-b931-ed32606c010d","Type":"ContainerDied","Data":"850d601e9633378d40912316fea9ac54177f93a36774f682baae8ff8d1d184ab"} Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.904911 4884 generic.go:334] "Generic (PLEG): container finished" podID="46544cf8-e2f2-4788-9d06-27a91f38e9de" containerID="109397a594cf0a69b6dd9009cb79b412b045ad2bfc86168000b49673ca7daa90" exitCode=0 Nov 28 16:56:34 crc kubenswrapper[4884]: I1128 16:56:34.905106 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" event={"ID":"46544cf8-e2f2-4788-9d06-27a91f38e9de","Type":"ContainerDied","Data":"109397a594cf0a69b6dd9009cb79b412b045ad2bfc86168000b49673ca7daa90"} Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.365767 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.371686 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439615 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data\") pod \"46544cf8-e2f2-4788-9d06-27a91f38e9de\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439658 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data\") pod \"06cbddb0-9fd4-44dc-b931-ed32606c010d\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439711 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle\") pod \"46544cf8-e2f2-4788-9d06-27a91f38e9de\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439748 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle\") pod \"06cbddb0-9fd4-44dc-b931-ed32606c010d\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439770 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts\") pod \"06cbddb0-9fd4-44dc-b931-ed32606c010d\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439807 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vwx9\" (UniqueName: \"kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9\") pod \"46544cf8-e2f2-4788-9d06-27a91f38e9de\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439836 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvcp7\" (UniqueName: \"kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7\") pod \"06cbddb0-9fd4-44dc-b931-ed32606c010d\" (UID: \"06cbddb0-9fd4-44dc-b931-ed32606c010d\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.439864 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts\") pod \"46544cf8-e2f2-4788-9d06-27a91f38e9de\" (UID: \"46544cf8-e2f2-4788-9d06-27a91f38e9de\") " Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.446109 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts" (OuterVolumeSpecName: "scripts") pod "06cbddb0-9fd4-44dc-b931-ed32606c010d" (UID: "06cbddb0-9fd4-44dc-b931-ed32606c010d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.446339 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9" (OuterVolumeSpecName: "kube-api-access-7vwx9") pod "46544cf8-e2f2-4788-9d06-27a91f38e9de" (UID: "46544cf8-e2f2-4788-9d06-27a91f38e9de"). InnerVolumeSpecName "kube-api-access-7vwx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.447938 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7" (OuterVolumeSpecName: "kube-api-access-mvcp7") pod "06cbddb0-9fd4-44dc-b931-ed32606c010d" (UID: "06cbddb0-9fd4-44dc-b931-ed32606c010d"). InnerVolumeSpecName "kube-api-access-mvcp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.448423 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts" (OuterVolumeSpecName: "scripts") pod "46544cf8-e2f2-4788-9d06-27a91f38e9de" (UID: "46544cf8-e2f2-4788-9d06-27a91f38e9de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.466336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06cbddb0-9fd4-44dc-b931-ed32606c010d" (UID: "06cbddb0-9fd4-44dc-b931-ed32606c010d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.471369 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data" (OuterVolumeSpecName: "config-data") pod "06cbddb0-9fd4-44dc-b931-ed32606c010d" (UID: "06cbddb0-9fd4-44dc-b931-ed32606c010d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.473600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "46544cf8-e2f2-4788-9d06-27a91f38e9de" (UID: "46544cf8-e2f2-4788-9d06-27a91f38e9de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.485970 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data" (OuterVolumeSpecName: "config-data") pod "46544cf8-e2f2-4788-9d06-27a91f38e9de" (UID: "46544cf8-e2f2-4788-9d06-27a91f38e9de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541278 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541328 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541345 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541360 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46544cf8-e2f2-4788-9d06-27a91f38e9de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541383 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541400 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/06cbddb0-9fd4-44dc-b931-ed32606c010d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541416 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vwx9\" (UniqueName: \"kubernetes.io/projected/46544cf8-e2f2-4788-9d06-27a91f38e9de-kube-api-access-7vwx9\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.541432 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvcp7\" (UniqueName: \"kubernetes.io/projected/06cbddb0-9fd4-44dc-b931-ed32606c010d-kube-api-access-mvcp7\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.925254 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" event={"ID":"46544cf8-e2f2-4788-9d06-27a91f38e9de","Type":"ContainerDied","Data":"fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253"} Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.925383 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd37a3184689caab692b0e5e8dbd8fda6c474befc07a3eba2ec9a96232c9e253" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.925498 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-6dsxb" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.927476 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kqdzv" Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.928306 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kqdzv" event={"ID":"06cbddb0-9fd4-44dc-b931-ed32606c010d","Type":"ContainerDied","Data":"2d235d53f918b8d6c6fe74a028da831135b36615e07fabfdaa69bd7e236add57"} Nov 28 16:56:36 crc kubenswrapper[4884]: I1128 16:56:36.928344 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d235d53f918b8d6c6fe74a028da831135b36615e07fabfdaa69bd7e236add57" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.013584 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: E1128 16:56:37.014083 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46544cf8-e2f2-4788-9d06-27a91f38e9de" containerName="nova-cell1-conductor-db-sync" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.014145 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="46544cf8-e2f2-4788-9d06-27a91f38e9de" containerName="nova-cell1-conductor-db-sync" Nov 28 16:56:37 crc kubenswrapper[4884]: E1128 16:56:37.014179 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06cbddb0-9fd4-44dc-b931-ed32606c010d" containerName="nova-manage" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.014188 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="06cbddb0-9fd4-44dc-b931-ed32606c010d" containerName="nova-manage" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.014401 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="46544cf8-e2f2-4788-9d06-27a91f38e9de" containerName="nova-cell1-conductor-db-sync" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.014426 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="06cbddb0-9fd4-44dc-b931-ed32606c010d" containerName="nova-manage" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.015214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.017962 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.053236 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.099611 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.100064 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-api" containerID="cri-o://95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" gracePeriod=30 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.099871 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-log" containerID="cri-o://349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" gracePeriod=30 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.153490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.153941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.154116 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4m5l\" (UniqueName: \"kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.172602 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.172810 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="45b5578b-5680-45af-b7e4-30860a0fa321" containerName="nova-scheduler-scheduler" containerID="cri-o://689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393" gracePeriod=30 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.200816 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.201124 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-log" containerID="cri-o://051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" gracePeriod=30 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.201482 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-metadata" containerID="cri-o://f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" gracePeriod=30 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.257124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.257238 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4m5l\" (UniqueName: \"kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.257264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.267857 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.268558 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.276974 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4m5l\" (UniqueName: \"kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l\") pod \"nova-cell1-conductor-0\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.344269 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.537678 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:37 crc kubenswrapper[4884]: E1128 16:56:37.643313 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9768451_9dcc_446f_9811_11d5a849ddfb.slice/crio-f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9768451_9dcc_446f_9811_11d5a849ddfb.slice/crio-conmon-051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9768451_9dcc_446f_9811_11d5a849ddfb.slice/crio-051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9768451_9dcc_446f_9811_11d5a849ddfb.slice/crio-conmon-f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.665950 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle\") pod \"423a0680-d4b1-476a-9c01-a3d86f40948d\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.666125 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slmbq\" (UniqueName: \"kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq\") pod \"423a0680-d4b1-476a-9c01-a3d86f40948d\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.666350 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs\") pod \"423a0680-d4b1-476a-9c01-a3d86f40948d\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.666400 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data\") pod \"423a0680-d4b1-476a-9c01-a3d86f40948d\" (UID: \"423a0680-d4b1-476a-9c01-a3d86f40948d\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.666957 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs" (OuterVolumeSpecName: "logs") pod "423a0680-d4b1-476a-9c01-a3d86f40948d" (UID: "423a0680-d4b1-476a-9c01-a3d86f40948d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.672896 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq" (OuterVolumeSpecName: "kube-api-access-slmbq") pod "423a0680-d4b1-476a-9c01-a3d86f40948d" (UID: "423a0680-d4b1-476a-9c01-a3d86f40948d"). InnerVolumeSpecName "kube-api-access-slmbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.699013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data" (OuterVolumeSpecName: "config-data") pod "423a0680-d4b1-476a-9c01-a3d86f40948d" (UID: "423a0680-d4b1-476a-9c01-a3d86f40948d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.744371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "423a0680-d4b1-476a-9c01-a3d86f40948d" (UID: "423a0680-d4b1-476a-9c01-a3d86f40948d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.762813 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.769961 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/423a0680-d4b1-476a-9c01-a3d86f40948d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.769992 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.770027 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/423a0680-d4b1-476a-9c01-a3d86f40948d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.770042 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slmbq\" (UniqueName: \"kubernetes.io/projected/423a0680-d4b1-476a-9c01-a3d86f40948d-kube-api-access-slmbq\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.870876 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data\") pod \"d9768451-9dcc-446f-9811-11d5a849ddfb\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.871052 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsh7q\" (UniqueName: \"kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q\") pod \"d9768451-9dcc-446f-9811-11d5a849ddfb\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.871128 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle\") pod \"d9768451-9dcc-446f-9811-11d5a849ddfb\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.871148 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs\") pod \"d9768451-9dcc-446f-9811-11d5a849ddfb\" (UID: \"d9768451-9dcc-446f-9811-11d5a849ddfb\") " Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.871625 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs" (OuterVolumeSpecName: "logs") pod "d9768451-9dcc-446f-9811-11d5a849ddfb" (UID: "d9768451-9dcc-446f-9811-11d5a849ddfb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.874540 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q" (OuterVolumeSpecName: "kube-api-access-qsh7q") pod "d9768451-9dcc-446f-9811-11d5a849ddfb" (UID: "d9768451-9dcc-446f-9811-11d5a849ddfb"). InnerVolumeSpecName "kube-api-access-qsh7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.896360 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data" (OuterVolumeSpecName: "config-data") pod "d9768451-9dcc-446f-9811-11d5a849ddfb" (UID: "d9768451-9dcc-446f-9811-11d5a849ddfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: W1128 16:56:37.897947 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3946aff4_5d46_4047_b17f_b92772d9477d.slice/crio-67cb62dab563fed866b4e8226685aee5df76b152a66046b3077bd0cdb61aceaf WatchSource:0}: Error finding container 67cb62dab563fed866b4e8226685aee5df76b152a66046b3077bd0cdb61aceaf: Status 404 returned error can't find the container with id 67cb62dab563fed866b4e8226685aee5df76b152a66046b3077bd0cdb61aceaf Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.898203 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.899285 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9768451-9dcc-446f-9811-11d5a849ddfb" (UID: "d9768451-9dcc-446f-9811-11d5a849ddfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.937333 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3946aff4-5d46-4047-b17f-b92772d9477d","Type":"ContainerStarted","Data":"67cb62dab563fed866b4e8226685aee5df76b152a66046b3077bd0cdb61aceaf"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939469 4884 generic.go:334] "Generic (PLEG): container finished" podID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerID="f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" exitCode=0 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939497 4884 generic.go:334] "Generic (PLEG): container finished" podID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerID="051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" exitCode=143 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939543 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939542 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerDied","Data":"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939645 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerDied","Data":"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d9768451-9dcc-446f-9811-11d5a849ddfb","Type":"ContainerDied","Data":"de30a549ce8297efb968a2edc5dddf20dd5301e5577303402a015b3c765588dd"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.939675 4884 scope.go:117] "RemoveContainer" containerID="f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949416 4884 generic.go:334] "Generic (PLEG): container finished" podID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerID="95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" exitCode=0 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949456 4884 generic.go:334] "Generic (PLEG): container finished" podID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerID="349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" exitCode=143 Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949467 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949474 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerDied","Data":"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949627 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerDied","Data":"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.949650 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"423a0680-d4b1-476a-9c01-a3d86f40948d","Type":"ContainerDied","Data":"feb8efbadbc07a8d9c499d30721255134d3af955db2cff4bcb72ca13a45ab008"} Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.968385 4884 scope.go:117] "RemoveContainer" containerID="051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.972517 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.972549 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsh7q\" (UniqueName: \"kubernetes.io/projected/d9768451-9dcc-446f-9811-11d5a849ddfb-kube-api-access-qsh7q\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.972562 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9768451-9dcc-446f-9811-11d5a849ddfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:37 crc kubenswrapper[4884]: I1128 16:56:37.972573 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9768451-9dcc-446f-9811-11d5a849ddfb-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.009512 4884 scope.go:117] "RemoveContainer" containerID="f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.010525 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.016215 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a\": container with ID starting with f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a not found: ID does not exist" containerID="f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.016277 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a"} err="failed to get container status \"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a\": rpc error: code = NotFound desc = could not find container \"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a\": container with ID starting with f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.016303 4884 scope.go:117] "RemoveContainer" containerID="051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.019622 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a\": container with ID starting with 051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a not found: ID does not exist" containerID="051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.019849 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a"} err="failed to get container status \"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a\": rpc error: code = NotFound desc = could not find container \"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a\": container with ID starting with 051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.019963 4884 scope.go:117] "RemoveContainer" containerID="f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.022302 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a"} err="failed to get container status \"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a\": rpc error: code = NotFound desc = could not find container \"f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a\": container with ID starting with f3974df4eb1aa1f20655cc1e585d28f67957a75160fa983098f73ae3670ce99a not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.022349 4884 scope.go:117] "RemoveContainer" containerID="051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.022752 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.025671 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a"} err="failed to get container status \"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a\": rpc error: code = NotFound desc = could not find container \"051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a\": container with ID starting with 051da4e5f97c56632f515ebb4b67afae2499fb00b34ea1653ec3dec2b3144f9a not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.025708 4884 scope.go:117] "RemoveContainer" containerID="95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.046140 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.059449 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.074292 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.074780 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-api" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.074802 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-api" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.074817 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-metadata" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.074827 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-metadata" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.074852 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-log" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.074861 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-log" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.074886 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-log" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.074896 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-log" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.075186 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-metadata" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.075213 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-log" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.075233 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" containerName="nova-metadata-log" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.075250 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" containerName="nova-api-api" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.082194 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.085020 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.102208 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.110710 4884 scope.go:117] "RemoveContainer" containerID="349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.120830 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.123244 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.129163 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.135566 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.137877 4884 scope.go:117] "RemoveContainer" containerID="95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.138523 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02\": container with ID starting with 95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02 not found: ID does not exist" containerID="95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.138561 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02"} err="failed to get container status \"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02\": rpc error: code = NotFound desc = could not find container \"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02\": container with ID starting with 95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02 not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.138612 4884 scope.go:117] "RemoveContainer" containerID="349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" Nov 28 16:56:38 crc kubenswrapper[4884]: E1128 16:56:38.139349 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359\": container with ID starting with 349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359 not found: ID does not exist" containerID="349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.139391 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359"} err="failed to get container status \"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359\": rpc error: code = NotFound desc = could not find container \"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359\": container with ID starting with 349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359 not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.139441 4884 scope.go:117] "RemoveContainer" containerID="95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.140959 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02"} err="failed to get container status \"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02\": rpc error: code = NotFound desc = could not find container \"95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02\": container with ID starting with 95ab4c5e0cf36f9b5c78d8394bba92a081274322c3186ec0c6396a84347bee02 not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.140994 4884 scope.go:117] "RemoveContainer" containerID="349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.141323 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359"} err="failed to get container status \"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359\": rpc error: code = NotFound desc = could not find container \"349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359\": container with ID starting with 349663d83f84a486d14d2ecae6a08beac9a24ac68767e2c5604dd00326797359 not found: ID does not exist" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.277761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld9s8\" (UniqueName: \"kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.277834 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.278681 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.279034 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.279076 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgfrm\" (UniqueName: \"kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.279162 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.279510 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.279581 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.380947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381135 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381197 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgfrm\" (UniqueName: \"kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381239 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381338 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381378 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld9s8\" (UniqueName: \"kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381743 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.381861 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.386248 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.386742 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.387667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.387737 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.401162 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgfrm\" (UniqueName: \"kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm\") pod \"nova-metadata-0\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.401565 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld9s8\" (UniqueName: \"kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8\") pod \"nova-api-0\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.409730 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.446860 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.703121 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="423a0680-d4b1-476a-9c01-a3d86f40948d" path="/var/lib/kubelet/pods/423a0680-d4b1-476a-9c01-a3d86f40948d/volumes" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.704394 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9768451-9dcc-446f-9811-11d5a849ddfb" path="/var/lib/kubelet/pods/d9768451-9dcc-446f-9811-11d5a849ddfb/volumes" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.876045 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: W1128 16:56:38.947571 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cb0238a_09e2_46d3_b052_0520f4c12307.slice/crio-e38df9655a6b48ac89ab050c34dfa06f4e96ecf91d1d7b766b519857f147eb6e WatchSource:0}: Error finding container e38df9655a6b48ac89ab050c34dfa06f4e96ecf91d1d7b766b519857f147eb6e: Status 404 returned error can't find the container with id e38df9655a6b48ac89ab050c34dfa06f4e96ecf91d1d7b766b519857f147eb6e Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.956045 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.982376 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerStarted","Data":"99d62a76cbbff2ef4f776ee3be592e3e7df70731c977c079aef92d22838f4059"} Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.989793 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3946aff4-5d46-4047-b17f-b92772d9477d","Type":"ContainerStarted","Data":"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e"} Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.990390 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:38 crc kubenswrapper[4884]: I1128 16:56:38.992310 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerStarted","Data":"e38df9655a6b48ac89ab050c34dfa06f4e96ecf91d1d7b766b519857f147eb6e"} Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.011631 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.011613108 podStartE2EDuration="3.011613108s" podCreationTimestamp="2025-11-28 16:56:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:39.005367235 +0000 UTC m=+5838.568151036" watchObservedRunningTime="2025-11-28 16:56:39.011613108 +0000 UTC m=+5838.574396909" Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.180121 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.194759 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.280255 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.352516 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.352811 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="dnsmasq-dns" containerID="cri-o://f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f" gracePeriod=10 Nov 28 16:56:39 crc kubenswrapper[4884]: I1128 16:56:39.896595 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.005987 4884 generic.go:334] "Generic (PLEG): container finished" podID="810a04af-eb91-4371-8b43-a1b733bef247" containerID="f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f" exitCode=0 Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.006112 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.006135 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" event={"ID":"810a04af-eb91-4371-8b43-a1b733bef247","Type":"ContainerDied","Data":"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.006223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55778596c9-v9n6n" event={"ID":"810a04af-eb91-4371-8b43-a1b733bef247","Type":"ContainerDied","Data":"37fa3d27756dd3fb44af29246a7d15577513def9438cd8b6d69ca991e8ffe65b"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.006247 4884 scope.go:117] "RemoveContainer" containerID="f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.009663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerStarted","Data":"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.009709 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerStarted","Data":"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015429 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpm8z\" (UniqueName: \"kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z\") pod \"810a04af-eb91-4371-8b43-a1b733bef247\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015476 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerStarted","Data":"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015542 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerStarted","Data":"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277"} Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc\") pod \"810a04af-eb91-4371-8b43-a1b733bef247\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config\") pod \"810a04af-eb91-4371-8b43-a1b733bef247\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015829 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb\") pod \"810a04af-eb91-4371-8b43-a1b733bef247\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.015851 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb\") pod \"810a04af-eb91-4371-8b43-a1b733bef247\" (UID: \"810a04af-eb91-4371-8b43-a1b733bef247\") " Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.020360 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z" (OuterVolumeSpecName: "kube-api-access-kpm8z") pod "810a04af-eb91-4371-8b43-a1b733bef247" (UID: "810a04af-eb91-4371-8b43-a1b733bef247"). InnerVolumeSpecName "kube-api-access-kpm8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.032568 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.037597 4884 scope.go:117] "RemoveContainer" containerID="cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.039372 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.039352218 podStartE2EDuration="2.039352218s" podCreationTimestamp="2025-11-28 16:56:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:40.032894299 +0000 UTC m=+5839.595678120" watchObservedRunningTime="2025-11-28 16:56:40.039352218 +0000 UTC m=+5839.602136019" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.087375 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.087358442 podStartE2EDuration="2.087358442s" podCreationTimestamp="2025-11-28 16:56:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:40.085549128 +0000 UTC m=+5839.648332929" watchObservedRunningTime="2025-11-28 16:56:40.087358442 +0000 UTC m=+5839.650142243" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.098460 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "810a04af-eb91-4371-8b43-a1b733bef247" (UID: "810a04af-eb91-4371-8b43-a1b733bef247"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.102276 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "810a04af-eb91-4371-8b43-a1b733bef247" (UID: "810a04af-eb91-4371-8b43-a1b733bef247"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.109986 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config" (OuterVolumeSpecName: "config") pod "810a04af-eb91-4371-8b43-a1b733bef247" (UID: "810a04af-eb91-4371-8b43-a1b733bef247"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.114291 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "810a04af-eb91-4371-8b43-a1b733bef247" (UID: "810a04af-eb91-4371-8b43-a1b733bef247"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.119318 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.119348 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.119357 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpm8z\" (UniqueName: \"kubernetes.io/projected/810a04af-eb91-4371-8b43-a1b733bef247-kube-api-access-kpm8z\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.119368 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.119376 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/810a04af-eb91-4371-8b43-a1b733bef247-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.121347 4884 scope.go:117] "RemoveContainer" containerID="f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f" Nov 28 16:56:40 crc kubenswrapper[4884]: E1128 16:56:40.122822 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f\": container with ID starting with f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f not found: ID does not exist" containerID="f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.122863 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f"} err="failed to get container status \"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f\": rpc error: code = NotFound desc = could not find container \"f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f\": container with ID starting with f87c766b7c9f99e81ec9b903492b86412cabd0969b011f28ddbfdf6ce356387f not found: ID does not exist" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.122887 4884 scope.go:117] "RemoveContainer" containerID="cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579" Nov 28 16:56:40 crc kubenswrapper[4884]: E1128 16:56:40.123258 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579\": container with ID starting with cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579 not found: ID does not exist" containerID="cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.123357 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579"} err="failed to get container status \"cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579\": rpc error: code = NotFound desc = could not find container \"cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579\": container with ID starting with cc5169694070dfac6c3bffaea2e0739cf8016cacc9e7a2c05d296a2067950579 not found: ID does not exist" Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.349657 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.359243 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55778596c9-v9n6n"] Nov 28 16:56:40 crc kubenswrapper[4884]: I1128 16:56:40.702619 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="810a04af-eb91-4371-8b43-a1b733bef247" path="/var/lib/kubelet/pods/810a04af-eb91-4371-8b43-a1b733bef247/volumes" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.603161 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.759585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle\") pod \"45b5578b-5680-45af-b7e4-30860a0fa321\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.759674 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data\") pod \"45b5578b-5680-45af-b7e4-30860a0fa321\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.759752 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bg8r\" (UniqueName: \"kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r\") pod \"45b5578b-5680-45af-b7e4-30860a0fa321\" (UID: \"45b5578b-5680-45af-b7e4-30860a0fa321\") " Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.771739 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r" (OuterVolumeSpecName: "kube-api-access-6bg8r") pod "45b5578b-5680-45af-b7e4-30860a0fa321" (UID: "45b5578b-5680-45af-b7e4-30860a0fa321"). InnerVolumeSpecName "kube-api-access-6bg8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.788312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45b5578b-5680-45af-b7e4-30860a0fa321" (UID: "45b5578b-5680-45af-b7e4-30860a0fa321"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.791573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data" (OuterVolumeSpecName: "config-data") pod "45b5578b-5680-45af-b7e4-30860a0fa321" (UID: "45b5578b-5680-45af-b7e4-30860a0fa321"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.862351 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.862385 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bg8r\" (UniqueName: \"kubernetes.io/projected/45b5578b-5680-45af-b7e4-30860a0fa321-kube-api-access-6bg8r\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:41 crc kubenswrapper[4884]: I1128 16:56:41.862396 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45b5578b-5680-45af-b7e4-30860a0fa321-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.034176 4884 generic.go:334] "Generic (PLEG): container finished" podID="45b5578b-5680-45af-b7e4-30860a0fa321" containerID="689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393" exitCode=0 Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.034240 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.034235 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45b5578b-5680-45af-b7e4-30860a0fa321","Type":"ContainerDied","Data":"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393"} Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.034622 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45b5578b-5680-45af-b7e4-30860a0fa321","Type":"ContainerDied","Data":"6b5cc785fc3d58dc0d0521d6c40b0166cc37daa2aabd72506df138d468ca967a"} Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.034648 4884 scope.go:117] "RemoveContainer" containerID="689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.060626 4884 scope.go:117] "RemoveContainer" containerID="689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393" Nov 28 16:56:42 crc kubenswrapper[4884]: E1128 16:56:42.062014 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393\": container with ID starting with 689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393 not found: ID does not exist" containerID="689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.062079 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393"} err="failed to get container status \"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393\": rpc error: code = NotFound desc = could not find container \"689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393\": container with ID starting with 689f1b6f3891406d77e14b74c29ef9ff52778bff3478d81553a825390d7bb393 not found: ID does not exist" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.098001 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.106314 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.120834 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:42 crc kubenswrapper[4884]: E1128 16:56:42.121516 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="dnsmasq-dns" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.121551 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="dnsmasq-dns" Nov 28 16:56:42 crc kubenswrapper[4884]: E1128 16:56:42.121607 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="init" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.121623 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="init" Nov 28 16:56:42 crc kubenswrapper[4884]: E1128 16:56:42.121644 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45b5578b-5680-45af-b7e4-30860a0fa321" containerName="nova-scheduler-scheduler" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.121656 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="45b5578b-5680-45af-b7e4-30860a0fa321" containerName="nova-scheduler-scheduler" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.121968 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="45b5578b-5680-45af-b7e4-30860a0fa321" containerName="nova-scheduler-scheduler" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.121999 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="810a04af-eb91-4371-8b43-a1b733bef247" containerName="dnsmasq-dns" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.123111 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.126489 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.133787 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.269643 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9pt7\" (UniqueName: \"kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.269814 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.269854 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.371612 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9pt7\" (UniqueName: \"kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.371736 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.371764 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.379239 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.379507 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.391614 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9pt7\" (UniqueName: \"kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7\") pod \"nova-scheduler-0\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.445683 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.700890 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45b5578b-5680-45af-b7e4-30860a0fa321" path="/var/lib/kubelet/pods/45b5578b-5680-45af-b7e4-30860a0fa321/volumes" Nov 28 16:56:42 crc kubenswrapper[4884]: I1128 16:56:42.879501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:42 crc kubenswrapper[4884]: W1128 16:56:42.880012 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod586695f2_0f27_485f_a053_f8210a83f4c6.slice/crio-d9c0d580642a5aa563adddb902a7b99b58078c8b9690c362caa13282389dd31b WatchSource:0}: Error finding container d9c0d580642a5aa563adddb902a7b99b58078c8b9690c362caa13282389dd31b: Status 404 returned error can't find the container with id d9c0d580642a5aa563adddb902a7b99b58078c8b9690c362caa13282389dd31b Nov 28 16:56:43 crc kubenswrapper[4884]: I1128 16:56:43.046631 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586695f2-0f27-485f-a053-f8210a83f4c6","Type":"ContainerStarted","Data":"d9c0d580642a5aa563adddb902a7b99b58078c8b9690c362caa13282389dd31b"} Nov 28 16:56:43 crc kubenswrapper[4884]: I1128 16:56:43.448032 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:56:43 crc kubenswrapper[4884]: I1128 16:56:43.448511 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:56:44 crc kubenswrapper[4884]: I1128 16:56:44.056422 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586695f2-0f27-485f-a053-f8210a83f4c6","Type":"ContainerStarted","Data":"436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea"} Nov 28 16:56:44 crc kubenswrapper[4884]: I1128 16:56:44.078589 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.078570592 podStartE2EDuration="2.078570592s" podCreationTimestamp="2025-11-28 16:56:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:44.074156344 +0000 UTC m=+5843.636940165" watchObservedRunningTime="2025-11-28 16:56:44.078570592 +0000 UTC m=+5843.641354393" Nov 28 16:56:45 crc kubenswrapper[4884]: I1128 16:56:45.688677 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:56:45 crc kubenswrapper[4884]: E1128 16:56:45.689239 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.373437 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.446762 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.799678 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-r9nwp"] Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.800966 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.803405 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.803582 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.809571 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-r9nwp"] Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.878649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.878788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.878828 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn7hr\" (UniqueName: \"kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.878869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.980843 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.980906 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn7hr\" (UniqueName: \"kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.980962 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.980990 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:47 crc kubenswrapper[4884]: I1128 16:56:47.991906 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.002629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.004006 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.007973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn7hr\" (UniqueName: \"kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr\") pod \"nova-cell1-cell-mapping-r9nwp\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.122779 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.410176 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.410547 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.447474 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.447515 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:56:48 crc kubenswrapper[4884]: I1128 16:56:48.588406 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-r9nwp"] Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.109318 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r9nwp" event={"ID":"283b5474-9682-4dbb-a66e-0c6b39b23398","Type":"ContainerStarted","Data":"9f5ec90f5f859ec359b101e0c818481f80cc6a01695cb78f6fc5e511e74d3669"} Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.109682 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r9nwp" event={"ID":"283b5474-9682-4dbb-a66e-0c6b39b23398","Type":"ContainerStarted","Data":"ab4bfe6fcb566963e443a6c6bda0ef7622a599b66e0afab594ca2e117608a57d"} Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.158388 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-r9nwp" podStartSLOduration=2.1583689 podStartE2EDuration="2.1583689s" podCreationTimestamp="2025-11-28 16:56:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:56:49.156271879 +0000 UTC m=+5848.719055680" watchObservedRunningTime="2025-11-28 16:56:49.1583689 +0000 UTC m=+5848.721152711" Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.576329 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.67:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.576315 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.66:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.576732 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.66:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:56:49 crc kubenswrapper[4884]: I1128 16:56:49.576774 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.67:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:56:52 crc kubenswrapper[4884]: I1128 16:56:52.446599 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:56:52 crc kubenswrapper[4884]: I1128 16:56:52.473647 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:56:53 crc kubenswrapper[4884]: I1128 16:56:53.184215 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:56:54 crc kubenswrapper[4884]: I1128 16:56:54.152237 4884 generic.go:334] "Generic (PLEG): container finished" podID="283b5474-9682-4dbb-a66e-0c6b39b23398" containerID="9f5ec90f5f859ec359b101e0c818481f80cc6a01695cb78f6fc5e511e74d3669" exitCode=0 Nov 28 16:56:54 crc kubenswrapper[4884]: I1128 16:56:54.152361 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r9nwp" event={"ID":"283b5474-9682-4dbb-a66e-0c6b39b23398","Type":"ContainerDied","Data":"9f5ec90f5f859ec359b101e0c818481f80cc6a01695cb78f6fc5e511e74d3669"} Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.532063 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.614172 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn7hr\" (UniqueName: \"kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr\") pod \"283b5474-9682-4dbb-a66e-0c6b39b23398\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.614340 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle\") pod \"283b5474-9682-4dbb-a66e-0c6b39b23398\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.614396 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data\") pod \"283b5474-9682-4dbb-a66e-0c6b39b23398\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.614432 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts\") pod \"283b5474-9682-4dbb-a66e-0c6b39b23398\" (UID: \"283b5474-9682-4dbb-a66e-0c6b39b23398\") " Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.619881 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts" (OuterVolumeSpecName: "scripts") pod "283b5474-9682-4dbb-a66e-0c6b39b23398" (UID: "283b5474-9682-4dbb-a66e-0c6b39b23398"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.619893 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr" (OuterVolumeSpecName: "kube-api-access-xn7hr") pod "283b5474-9682-4dbb-a66e-0c6b39b23398" (UID: "283b5474-9682-4dbb-a66e-0c6b39b23398"). InnerVolumeSpecName "kube-api-access-xn7hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.645449 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "283b5474-9682-4dbb-a66e-0c6b39b23398" (UID: "283b5474-9682-4dbb-a66e-0c6b39b23398"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.646231 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data" (OuterVolumeSpecName: "config-data") pod "283b5474-9682-4dbb-a66e-0c6b39b23398" (UID: "283b5474-9682-4dbb-a66e-0c6b39b23398"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.716035 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.716067 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.716075 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/283b5474-9682-4dbb-a66e-0c6b39b23398-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:55 crc kubenswrapper[4884]: I1128 16:56:55.716104 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn7hr\" (UniqueName: \"kubernetes.io/projected/283b5474-9682-4dbb-a66e-0c6b39b23398-kube-api-access-xn7hr\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.172148 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-r9nwp" event={"ID":"283b5474-9682-4dbb-a66e-0c6b39b23398","Type":"ContainerDied","Data":"ab4bfe6fcb566963e443a6c6bda0ef7622a599b66e0afab594ca2e117608a57d"} Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.172454 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab4bfe6fcb566963e443a6c6bda0ef7622a599b66e0afab594ca2e117608a57d" Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.172251 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-r9nwp" Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.357712 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.357988 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-log" containerID="cri-o://3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0" gracePeriod=30 Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.358516 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-api" containerID="cri-o://68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96" gracePeriod=30 Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.369235 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.369469 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" containerName="nova-scheduler-scheduler" containerID="cri-o://436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" gracePeriod=30 Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.384372 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.384614 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-log" containerID="cri-o://52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277" gracePeriod=30 Nov 28 16:56:56 crc kubenswrapper[4884]: I1128 16:56:56.384722 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-metadata" containerID="cri-o://622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b" gracePeriod=30 Nov 28 16:56:57 crc kubenswrapper[4884]: I1128 16:56:57.184237 4884 generic.go:334] "Generic (PLEG): container finished" podID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerID="3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0" exitCode=143 Nov 28 16:56:57 crc kubenswrapper[4884]: I1128 16:56:57.184313 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerDied","Data":"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0"} Nov 28 16:56:57 crc kubenswrapper[4884]: I1128 16:56:57.186388 4884 generic.go:334] "Generic (PLEG): container finished" podID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerID="52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277" exitCode=143 Nov 28 16:56:57 crc kubenswrapper[4884]: I1128 16:56:57.186410 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerDied","Data":"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277"} Nov 28 16:56:57 crc kubenswrapper[4884]: E1128 16:56:57.449851 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:56:57 crc kubenswrapper[4884]: E1128 16:56:57.451243 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:56:57 crc kubenswrapper[4884]: E1128 16:56:57.458876 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:56:57 crc kubenswrapper[4884]: E1128 16:56:57.458986 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" containerName="nova-scheduler-scheduler" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.204821 4884 generic.go:334] "Generic (PLEG): container finished" podID="586695f2-0f27-485f-a053-f8210a83f4c6" containerID="436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" exitCode=0 Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.204989 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586695f2-0f27-485f-a053-f8210a83f4c6","Type":"ContainerDied","Data":"436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea"} Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.688995 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:56:59 crc kubenswrapper[4884]: E1128 16:56:59.689721 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.754709 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.888499 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle\") pod \"586695f2-0f27-485f-a053-f8210a83f4c6\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.888605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") pod \"586695f2-0f27-485f-a053-f8210a83f4c6\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.888702 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9pt7\" (UniqueName: \"kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7\") pod \"586695f2-0f27-485f-a053-f8210a83f4c6\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.894419 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7" (OuterVolumeSpecName: "kube-api-access-g9pt7") pod "586695f2-0f27-485f-a053-f8210a83f4c6" (UID: "586695f2-0f27-485f-a053-f8210a83f4c6"). InnerVolumeSpecName "kube-api-access-g9pt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:56:59 crc kubenswrapper[4884]: E1128 16:56:59.941901 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data podName:586695f2-0f27-485f-a053-f8210a83f4c6 nodeName:}" failed. No retries permitted until 2025-11-28 16:57:00.441871935 +0000 UTC m=+5860.004655736 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data") pod "586695f2-0f27-485f-a053-f8210a83f4c6" (UID: "586695f2-0f27-485f-a053-f8210a83f4c6") : error deleting /var/lib/kubelet/pods/586695f2-0f27-485f-a053-f8210a83f4c6/volume-subpaths: remove /var/lib/kubelet/pods/586695f2-0f27-485f-a053-f8210a83f4c6/volume-subpaths: no such file or directory Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.947656 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "586695f2-0f27-485f-a053-f8210a83f4c6" (UID: "586695f2-0f27-485f-a053-f8210a83f4c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.957309 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.992420 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:56:59 crc kubenswrapper[4884]: I1128 16:56:59.992457 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9pt7\" (UniqueName: \"kubernetes.io/projected/586695f2-0f27-485f-a053-f8210a83f4c6-kube-api-access-g9pt7\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.025261 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093324 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld9s8\" (UniqueName: \"kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8\") pod \"8d223366-f9e0-4773-b7fc-55a246f05d88\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093451 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgfrm\" (UniqueName: \"kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm\") pod \"6cb0238a-09e2-46d3-b052-0520f4c12307\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093517 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs\") pod \"8d223366-f9e0-4773-b7fc-55a246f05d88\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093598 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data\") pod \"8d223366-f9e0-4773-b7fc-55a246f05d88\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093680 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle\") pod \"6cb0238a-09e2-46d3-b052-0520f4c12307\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093722 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs\") pod \"6cb0238a-09e2-46d3-b052-0520f4c12307\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093769 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle\") pod \"8d223366-f9e0-4773-b7fc-55a246f05d88\" (UID: \"8d223366-f9e0-4773-b7fc-55a246f05d88\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.093962 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data\") pod \"6cb0238a-09e2-46d3-b052-0520f4c12307\" (UID: \"6cb0238a-09e2-46d3-b052-0520f4c12307\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.095912 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs" (OuterVolumeSpecName: "logs") pod "6cb0238a-09e2-46d3-b052-0520f4c12307" (UID: "6cb0238a-09e2-46d3-b052-0520f4c12307"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.096659 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs" (OuterVolumeSpecName: "logs") pod "8d223366-f9e0-4773-b7fc-55a246f05d88" (UID: "8d223366-f9e0-4773-b7fc-55a246f05d88"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.100013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8" (OuterVolumeSpecName: "kube-api-access-ld9s8") pod "8d223366-f9e0-4773-b7fc-55a246f05d88" (UID: "8d223366-f9e0-4773-b7fc-55a246f05d88"). InnerVolumeSpecName "kube-api-access-ld9s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.100310 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm" (OuterVolumeSpecName: "kube-api-access-xgfrm") pod "6cb0238a-09e2-46d3-b052-0520f4c12307" (UID: "6cb0238a-09e2-46d3-b052-0520f4c12307"). InnerVolumeSpecName "kube-api-access-xgfrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.121518 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cb0238a-09e2-46d3-b052-0520f4c12307" (UID: "6cb0238a-09e2-46d3-b052-0520f4c12307"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.121772 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data" (OuterVolumeSpecName: "config-data") pod "6cb0238a-09e2-46d3-b052-0520f4c12307" (UID: "6cb0238a-09e2-46d3-b052-0520f4c12307"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.123006 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data" (OuterVolumeSpecName: "config-data") pod "8d223366-f9e0-4773-b7fc-55a246f05d88" (UID: "8d223366-f9e0-4773-b7fc-55a246f05d88"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.123589 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d223366-f9e0-4773-b7fc-55a246f05d88" (UID: "8d223366-f9e0-4773-b7fc-55a246f05d88"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196770 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6cb0238a-09e2-46d3-b052-0520f4c12307-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196814 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196828 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196841 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld9s8\" (UniqueName: \"kubernetes.io/projected/8d223366-f9e0-4773-b7fc-55a246f05d88-kube-api-access-ld9s8\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196857 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgfrm\" (UniqueName: \"kubernetes.io/projected/6cb0238a-09e2-46d3-b052-0520f4c12307-kube-api-access-xgfrm\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196870 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8d223366-f9e0-4773-b7fc-55a246f05d88-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196881 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d223366-f9e0-4773-b7fc-55a246f05d88-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.196892 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb0238a-09e2-46d3-b052-0520f4c12307-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.216671 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"586695f2-0f27-485f-a053-f8210a83f4c6","Type":"ContainerDied","Data":"d9c0d580642a5aa563adddb902a7b99b58078c8b9690c362caa13282389dd31b"} Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.216748 4884 scope.go:117] "RemoveContainer" containerID="436ba1ddd5b3341d99ec9d3390a0ee1c29ca356a5a0294638e1d302fc23416ea" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.216691 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.221851 4884 generic.go:334] "Generic (PLEG): container finished" podID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerID="68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96" exitCode=0 Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.221923 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.221946 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerDied","Data":"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96"} Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.222010 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8d223366-f9e0-4773-b7fc-55a246f05d88","Type":"ContainerDied","Data":"99d62a76cbbff2ef4f776ee3be592e3e7df70731c977c079aef92d22838f4059"} Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.234353 4884 generic.go:334] "Generic (PLEG): container finished" podID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerID="622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b" exitCode=0 Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.234395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerDied","Data":"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b"} Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.234425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6cb0238a-09e2-46d3-b052-0520f4c12307","Type":"ContainerDied","Data":"e38df9655a6b48ac89ab050c34dfa06f4e96ecf91d1d7b766b519857f147eb6e"} Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.234492 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.297767 4884 scope.go:117] "RemoveContainer" containerID="68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.316655 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.324156 4884 scope.go:117] "RemoveContainer" containerID="3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.327392 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.336510 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.358267 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.365393 4884 scope.go:117] "RemoveContainer" containerID="68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368157 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368626 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="283b5474-9682-4dbb-a66e-0c6b39b23398" containerName="nova-manage" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368650 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="283b5474-9682-4dbb-a66e-0c6b39b23398" containerName="nova-manage" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368661 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-api" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368670 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-api" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368683 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-log" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368691 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-log" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368711 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-log" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368718 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-log" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368737 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-metadata" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368744 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-metadata" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.368775 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" containerName="nova-scheduler-scheduler" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368783 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" containerName="nova-scheduler-scheduler" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.368988 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-api" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.369005 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="283b5474-9682-4dbb-a66e-0c6b39b23398" containerName="nova-manage" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.369021 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-metadata" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.369028 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" containerName="nova-scheduler-scheduler" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.369050 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" containerName="nova-api-log" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.369062 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" containerName="nova-metadata-log" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.370347 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.373192 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96\": container with ID starting with 68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96 not found: ID does not exist" containerID="68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.373244 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96"} err="failed to get container status \"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96\": rpc error: code = NotFound desc = could not find container \"68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96\": container with ID starting with 68bd50f46c195e7b28ffd77422aada88f028db0c27a5c82dee2857638f7e1d96 not found: ID does not exist" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.373274 4884 scope.go:117] "RemoveContainer" containerID="3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.373391 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.374179 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0\": container with ID starting with 3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0 not found: ID does not exist" containerID="3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.374219 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0"} err="failed to get container status \"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0\": rpc error: code = NotFound desc = could not find container \"3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0\": container with ID starting with 3f4d9204e92ee06ae6c71f8d2d378ed928eb94fa87f5e5a755d2b68d152549b0 not found: ID does not exist" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.374290 4884 scope.go:117] "RemoveContainer" containerID="622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.392491 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.393972 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.395955 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.409802 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.418559 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.435213 4884 scope.go:117] "RemoveContainer" containerID="52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.457795 4884 scope.go:117] "RemoveContainer" containerID="622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.459538 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b\": container with ID starting with 622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b not found: ID does not exist" containerID="622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.459588 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b"} err="failed to get container status \"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b\": rpc error: code = NotFound desc = could not find container \"622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b\": container with ID starting with 622712e690cc0c07890833194db5593faad18cdbbf1860fab20f9ece553df44b not found: ID does not exist" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.459628 4884 scope.go:117] "RemoveContainer" containerID="52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277" Nov 28 16:57:00 crc kubenswrapper[4884]: E1128 16:57:00.460065 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277\": container with ID starting with 52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277 not found: ID does not exist" containerID="52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.460157 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277"} err="failed to get container status \"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277\": rpc error: code = NotFound desc = could not find container \"52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277\": container with ID starting with 52aa5fa283d115d74116b4266263eeadf048caa417c52b1de1c0f4e0f54af277 not found: ID does not exist" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.504929 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") pod \"586695f2-0f27-485f-a053-f8210a83f4c6\" (UID: \"586695f2-0f27-485f-a053-f8210a83f4c6\") " Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505661 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505721 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505805 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505843 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9j4x\" (UniqueName: \"kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505929 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.505973 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lghjx\" (UniqueName: \"kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.506055 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.508327 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data" (OuterVolumeSpecName: "config-data") pod "586695f2-0f27-485f-a053-f8210a83f4c6" (UID: "586695f2-0f27-485f-a053-f8210a83f4c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.564913 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.573974 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.583959 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.598208 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.600522 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.604572 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607687 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607743 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607790 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9j4x\" (UniqueName: \"kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607873 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lghjx\" (UniqueName: \"kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.607944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.608007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.608046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.608340 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/586695f2-0f27-485f-a053-f8210a83f4c6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.608506 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.609349 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.613973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.614006 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.615826 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.616163 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.629952 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9j4x\" (UniqueName: \"kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x\") pod \"nova-metadata-0\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.633570 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lghjx\" (UniqueName: \"kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx\") pod \"nova-api-0\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.702551 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.705632 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="586695f2-0f27-485f-a053-f8210a83f4c6" path="/var/lib/kubelet/pods/586695f2-0f27-485f-a053-f8210a83f4c6/volumes" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.706337 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cb0238a-09e2-46d3-b052-0520f4c12307" path="/var/lib/kubelet/pods/6cb0238a-09e2-46d3-b052-0520f4c12307/volumes" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.707034 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d223366-f9e0-4773-b7fc-55a246f05d88" path="/var/lib/kubelet/pods/8d223366-f9e0-4773-b7fc-55a246f05d88/volumes" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.709458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw7g5\" (UniqueName: \"kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.709520 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.709545 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.716782 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.811102 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw7g5\" (UniqueName: \"kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.811474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.811497 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.816810 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.818591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:00 crc kubenswrapper[4884]: I1128 16:57:00.834821 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw7g5\" (UniqueName: \"kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5\") pod \"nova-scheduler-0\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " pod="openstack/nova-scheduler-0" Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.002561 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.124702 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.158819 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:57:01 crc kubenswrapper[4884]: W1128 16:57:01.173198 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e374b20_e3f7_46ea_8489_3874c4778557.slice/crio-2bc9db0723a2b5540e862749e776a34060bb778ce3491187769c646533dac39c WatchSource:0}: Error finding container 2bc9db0723a2b5540e862749e776a34060bb778ce3491187769c646533dac39c: Status 404 returned error can't find the container with id 2bc9db0723a2b5540e862749e776a34060bb778ce3491187769c646533dac39c Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.276147 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerStarted","Data":"2bc9db0723a2b5540e862749e776a34060bb778ce3491187769c646533dac39c"} Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.282521 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerStarted","Data":"3835a9ae6208f37052ea8d9c54c942d9f7b41589215e8e2842b94b77a0cb63c2"} Nov 28 16:57:01 crc kubenswrapper[4884]: I1128 16:57:01.524322 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.294851 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerStarted","Data":"23fa4b8bcccab825e1fdc70642491bed705058d294a71a1edcfa31ff79db1320"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.295239 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerStarted","Data":"ec0dea92d0e99dc3cd9129a6395fb3b20e5f1893b20089b1a6960f34201514f7"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.300193 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerStarted","Data":"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.300244 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerStarted","Data":"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.302126 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ca70283-cb6d-47ca-8039-a2ec6c4af560","Type":"ContainerStarted","Data":"8da3996f6b4c381e20c1561577bec6d68dd54693d7e39365e7ae84bb77814f01"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.302162 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ca70283-cb6d-47ca-8039-a2ec6c4af560","Type":"ContainerStarted","Data":"60cf18df933e4259bc0c429946e55b5334d5afebe385708294bc6be4757bcd6b"} Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.323713 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.3236910809999998 podStartE2EDuration="2.323691081s" podCreationTimestamp="2025-11-28 16:57:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:02.317407917 +0000 UTC m=+5861.880191728" watchObservedRunningTime="2025-11-28 16:57:02.323691081 +0000 UTC m=+5861.886474882" Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.348230 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.34821643 podStartE2EDuration="2.34821643s" podCreationTimestamp="2025-11-28 16:57:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:02.335612742 +0000 UTC m=+5861.898396543" watchObservedRunningTime="2025-11-28 16:57:02.34821643 +0000 UTC m=+5861.911000391" Nov 28 16:57:02 crc kubenswrapper[4884]: I1128 16:57:02.358503 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.358484522 podStartE2EDuration="2.358484522s" podCreationTimestamp="2025-11-28 16:57:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:02.357816315 +0000 UTC m=+5861.920600126" watchObservedRunningTime="2025-11-28 16:57:02.358484522 +0000 UTC m=+5861.921268333" Nov 28 16:57:05 crc kubenswrapper[4884]: I1128 16:57:05.703185 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:57:05 crc kubenswrapper[4884]: I1128 16:57:05.703752 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:57:06 crc kubenswrapper[4884]: I1128 16:57:06.003545 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:57:10 crc kubenswrapper[4884]: I1128 16:57:10.702850 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:57:10 crc kubenswrapper[4884]: I1128 16:57:10.703377 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:57:10 crc kubenswrapper[4884]: I1128 16:57:10.717556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:57:10 crc kubenswrapper[4884]: I1128 16:57:10.717866 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.003346 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.035852 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.409414 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.868248 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.868286 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.71:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.868326 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:57:11 crc kubenswrapper[4884]: I1128 16:57:11.868343 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.71:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:57:13 crc kubenswrapper[4884]: I1128 16:57:13.688618 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:57:13 crc kubenswrapper[4884]: E1128 16:57:13.690176 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.712724 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.715800 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.717370 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.722912 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.723350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.725214 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:57:20 crc kubenswrapper[4884]: I1128 16:57:20.728440 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.468861 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.471963 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.473216 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.754935 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.756464 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.776616 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.807683 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp9jv\" (UniqueName: \"kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.807754 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.807784 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.807799 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.807838 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.910041 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp9jv\" (UniqueName: \"kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.910148 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.910217 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.910267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.910316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.911453 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.911499 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.911577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.911936 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:21 crc kubenswrapper[4884]: I1128 16:57:21.929050 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp9jv\" (UniqueName: \"kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv\") pod \"dnsmasq-dns-794688c677-msx6x\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:22 crc kubenswrapper[4884]: I1128 16:57:22.084969 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:22 crc kubenswrapper[4884]: I1128 16:57:22.545816 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:57:23 crc kubenswrapper[4884]: I1128 16:57:23.487639 4884 generic.go:334] "Generic (PLEG): container finished" podID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerID="d5e40fc8a2307c6b7a94e169c1da6bdd582baaf7b95086a889ceab451c89faf2" exitCode=0 Nov 28 16:57:23 crc kubenswrapper[4884]: I1128 16:57:23.487741 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794688c677-msx6x" event={"ID":"177f87bf-3e45-4455-947e-e40d0ec1b251","Type":"ContainerDied","Data":"d5e40fc8a2307c6b7a94e169c1da6bdd582baaf7b95086a889ceab451c89faf2"} Nov 28 16:57:23 crc kubenswrapper[4884]: I1128 16:57:23.488116 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794688c677-msx6x" event={"ID":"177f87bf-3e45-4455-947e-e40d0ec1b251","Type":"ContainerStarted","Data":"b5cb52f00deb26cac2e5041f55f8cfda2d3789536625d5990229bf0d42b2839b"} Nov 28 16:57:24 crc kubenswrapper[4884]: I1128 16:57:24.497167 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794688c677-msx6x" event={"ID":"177f87bf-3e45-4455-947e-e40d0ec1b251","Type":"ContainerStarted","Data":"906d5fbe19f4797529b32fffa4c0d001766cb787ae43969e3b2ec74d3c2c0392"} Nov 28 16:57:24 crc kubenswrapper[4884]: I1128 16:57:24.497679 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:24 crc kubenswrapper[4884]: I1128 16:57:24.521826 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-794688c677-msx6x" podStartSLOduration=3.521810906 podStartE2EDuration="3.521810906s" podCreationTimestamp="2025-11-28 16:57:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:24.520269228 +0000 UTC m=+5884.083053029" watchObservedRunningTime="2025-11-28 16:57:24.521810906 +0000 UTC m=+5884.084594707" Nov 28 16:57:24 crc kubenswrapper[4884]: I1128 16:57:24.689865 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:57:24 crc kubenswrapper[4884]: E1128 16:57:24.690082 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.087461 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.161487 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.161709 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="dnsmasq-dns" containerID="cri-o://9b9496870baeb91518dbeccea8a731f1a5a576163cb7b1f53a4e87f06bf713d3" gracePeriod=10 Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.581872 4884 generic.go:334] "Generic (PLEG): container finished" podID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerID="9b9496870baeb91518dbeccea8a731f1a5a576163cb7b1f53a4e87f06bf713d3" exitCode=0 Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.581978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" event={"ID":"aa810bba-2b91-426f-8ab9-6ce129794e16","Type":"ContainerDied","Data":"9b9496870baeb91518dbeccea8a731f1a5a576163cb7b1f53a4e87f06bf713d3"} Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.582301 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" event={"ID":"aa810bba-2b91-426f-8ab9-6ce129794e16","Type":"ContainerDied","Data":"7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83"} Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.582318 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b83c0f243832858fb47a4170d7a38ce3b33c78a6ec75c01776b6cefbbaa6a83" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.664278 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.815677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9twxh\" (UniqueName: \"kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh\") pod \"aa810bba-2b91-426f-8ab9-6ce129794e16\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.815778 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb\") pod \"aa810bba-2b91-426f-8ab9-6ce129794e16\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.815830 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb\") pod \"aa810bba-2b91-426f-8ab9-6ce129794e16\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.815908 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc\") pod \"aa810bba-2b91-426f-8ab9-6ce129794e16\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.815954 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config\") pod \"aa810bba-2b91-426f-8ab9-6ce129794e16\" (UID: \"aa810bba-2b91-426f-8ab9-6ce129794e16\") " Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.833713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh" (OuterVolumeSpecName: "kube-api-access-9twxh") pod "aa810bba-2b91-426f-8ab9-6ce129794e16" (UID: "aa810bba-2b91-426f-8ab9-6ce129794e16"). InnerVolumeSpecName "kube-api-access-9twxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.916967 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config" (OuterVolumeSpecName: "config") pod "aa810bba-2b91-426f-8ab9-6ce129794e16" (UID: "aa810bba-2b91-426f-8ab9-6ce129794e16"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.918164 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9twxh\" (UniqueName: \"kubernetes.io/projected/aa810bba-2b91-426f-8ab9-6ce129794e16-kube-api-access-9twxh\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.918185 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.922582 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa810bba-2b91-426f-8ab9-6ce129794e16" (UID: "aa810bba-2b91-426f-8ab9-6ce129794e16"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.941646 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa810bba-2b91-426f-8ab9-6ce129794e16" (UID: "aa810bba-2b91-426f-8ab9-6ce129794e16"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:57:32 crc kubenswrapper[4884]: I1128 16:57:32.947888 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa810bba-2b91-426f-8ab9-6ce129794e16" (UID: "aa810bba-2b91-426f-8ab9-6ce129794e16"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.020459 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.020507 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.020521 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa810bba-2b91-426f-8ab9-6ce129794e16-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.589533 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b56bc7bfc-v7njx" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.627474 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.641454 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b56bc7bfc-v7njx"] Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.879427 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-lw9qd"] Nov 28 16:57:33 crc kubenswrapper[4884]: E1128 16:57:33.879796 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="dnsmasq-dns" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.879813 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="dnsmasq-dns" Nov 28 16:57:33 crc kubenswrapper[4884]: E1128 16:57:33.879841 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="init" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.879848 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="init" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.880018 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" containerName="dnsmasq-dns" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.880687 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:33 crc kubenswrapper[4884]: I1128 16:57:33.891082 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lw9qd"] Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.036719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk8fd\" (UniqueName: \"kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd\") pod \"cinder-db-create-lw9qd\" (UID: \"f92a45a6-8ede-457a-bc9b-9364ed480d25\") " pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.138202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk8fd\" (UniqueName: \"kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd\") pod \"cinder-db-create-lw9qd\" (UID: \"f92a45a6-8ede-457a-bc9b-9364ed480d25\") " pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.158237 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk8fd\" (UniqueName: \"kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd\") pod \"cinder-db-create-lw9qd\" (UID: \"f92a45a6-8ede-457a-bc9b-9364ed480d25\") " pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.207528 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.673989 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-lw9qd"] Nov 28 16:57:34 crc kubenswrapper[4884]: I1128 16:57:34.698474 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa810bba-2b91-426f-8ab9-6ce129794e16" path="/var/lib/kubelet/pods/aa810bba-2b91-426f-8ab9-6ce129794e16/volumes" Nov 28 16:57:35 crc kubenswrapper[4884]: I1128 16:57:35.607241 4884 generic.go:334] "Generic (PLEG): container finished" podID="f92a45a6-8ede-457a-bc9b-9364ed480d25" containerID="8ce412ae0f0c16f54c8c804e892361de64e404155e36f25b0aec81f4ac522b5e" exitCode=0 Nov 28 16:57:35 crc kubenswrapper[4884]: I1128 16:57:35.607305 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lw9qd" event={"ID":"f92a45a6-8ede-457a-bc9b-9364ed480d25","Type":"ContainerDied","Data":"8ce412ae0f0c16f54c8c804e892361de64e404155e36f25b0aec81f4ac522b5e"} Nov 28 16:57:35 crc kubenswrapper[4884]: I1128 16:57:35.607847 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lw9qd" event={"ID":"f92a45a6-8ede-457a-bc9b-9364ed480d25","Type":"ContainerStarted","Data":"15940b7ef387c61dd425efc97ecc8f900bd754948613a6cd5dd36a18229c20e8"} Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.004547 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.201858 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk8fd\" (UniqueName: \"kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd\") pod \"f92a45a6-8ede-457a-bc9b-9364ed480d25\" (UID: \"f92a45a6-8ede-457a-bc9b-9364ed480d25\") " Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.209801 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd" (OuterVolumeSpecName: "kube-api-access-wk8fd") pod "f92a45a6-8ede-457a-bc9b-9364ed480d25" (UID: "f92a45a6-8ede-457a-bc9b-9364ed480d25"). InnerVolumeSpecName "kube-api-access-wk8fd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.304225 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk8fd\" (UniqueName: \"kubernetes.io/projected/f92a45a6-8ede-457a-bc9b-9364ed480d25-kube-api-access-wk8fd\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.634253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-lw9qd" event={"ID":"f92a45a6-8ede-457a-bc9b-9364ed480d25","Type":"ContainerDied","Data":"15940b7ef387c61dd425efc97ecc8f900bd754948613a6cd5dd36a18229c20e8"} Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.634304 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15940b7ef387c61dd425efc97ecc8f900bd754948613a6cd5dd36a18229c20e8" Nov 28 16:57:37 crc kubenswrapper[4884]: I1128 16:57:37.634376 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-lw9qd" Nov 28 16:57:38 crc kubenswrapper[4884]: I1128 16:57:38.689411 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:57:38 crc kubenswrapper[4884]: E1128 16:57:38.689680 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:57:43 crc kubenswrapper[4884]: I1128 16:57:43.982592 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5f1f-account-create-fgnw4"] Nov 28 16:57:43 crc kubenswrapper[4884]: E1128 16:57:43.983801 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f92a45a6-8ede-457a-bc9b-9364ed480d25" containerName="mariadb-database-create" Nov 28 16:57:43 crc kubenswrapper[4884]: I1128 16:57:43.983823 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f92a45a6-8ede-457a-bc9b-9364ed480d25" containerName="mariadb-database-create" Nov 28 16:57:43 crc kubenswrapper[4884]: I1128 16:57:43.984977 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f92a45a6-8ede-457a-bc9b-9364ed480d25" containerName="mariadb-database-create" Nov 28 16:57:43 crc kubenswrapper[4884]: I1128 16:57:43.985853 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:43 crc kubenswrapper[4884]: I1128 16:57:43.988857 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.009134 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5f1f-account-create-fgnw4"] Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.119145 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6b69\" (UniqueName: \"kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69\") pod \"cinder-5f1f-account-create-fgnw4\" (UID: \"e329d7c7-53b2-4a02-9119-ca75c0d423fd\") " pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.220392 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6b69\" (UniqueName: \"kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69\") pod \"cinder-5f1f-account-create-fgnw4\" (UID: \"e329d7c7-53b2-4a02-9119-ca75c0d423fd\") " pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.245201 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6b69\" (UniqueName: \"kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69\") pod \"cinder-5f1f-account-create-fgnw4\" (UID: \"e329d7c7-53b2-4a02-9119-ca75c0d423fd\") " pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.305323 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:44 crc kubenswrapper[4884]: I1128 16:57:44.725523 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5f1f-account-create-fgnw4"] Nov 28 16:57:45 crc kubenswrapper[4884]: I1128 16:57:45.706820 4884 generic.go:334] "Generic (PLEG): container finished" podID="e329d7c7-53b2-4a02-9119-ca75c0d423fd" containerID="fc20a08b5191c7333e5a3e858e4d4d8a582b962b059464136ee62f3a10659758" exitCode=0 Nov 28 16:57:45 crc kubenswrapper[4884]: I1128 16:57:45.706880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5f1f-account-create-fgnw4" event={"ID":"e329d7c7-53b2-4a02-9119-ca75c0d423fd","Type":"ContainerDied","Data":"fc20a08b5191c7333e5a3e858e4d4d8a582b962b059464136ee62f3a10659758"} Nov 28 16:57:45 crc kubenswrapper[4884]: I1128 16:57:45.706917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5f1f-account-create-fgnw4" event={"ID":"e329d7c7-53b2-4a02-9119-ca75c0d423fd","Type":"ContainerStarted","Data":"7e8ff1bfc46c00e89f37a56fb07d099fb2e07e1b2655f508303e480395e208c3"} Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.047377 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.217235 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6b69\" (UniqueName: \"kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69\") pod \"e329d7c7-53b2-4a02-9119-ca75c0d423fd\" (UID: \"e329d7c7-53b2-4a02-9119-ca75c0d423fd\") " Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.225028 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69" (OuterVolumeSpecName: "kube-api-access-n6b69") pod "e329d7c7-53b2-4a02-9119-ca75c0d423fd" (UID: "e329d7c7-53b2-4a02-9119-ca75c0d423fd"). InnerVolumeSpecName "kube-api-access-n6b69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.319074 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6b69\" (UniqueName: \"kubernetes.io/projected/e329d7c7-53b2-4a02-9119-ca75c0d423fd-kube-api-access-n6b69\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.724428 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5f1f-account-create-fgnw4" event={"ID":"e329d7c7-53b2-4a02-9119-ca75c0d423fd","Type":"ContainerDied","Data":"7e8ff1bfc46c00e89f37a56fb07d099fb2e07e1b2655f508303e480395e208c3"} Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.724709 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e8ff1bfc46c00e89f37a56fb07d099fb2e07e1b2655f508303e480395e208c3" Nov 28 16:57:47 crc kubenswrapper[4884]: I1128 16:57:47.724495 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5f1f-account-create-fgnw4" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.235811 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-snckd"] Nov 28 16:57:49 crc kubenswrapper[4884]: E1128 16:57:49.236620 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e329d7c7-53b2-4a02-9119-ca75c0d423fd" containerName="mariadb-account-create" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.236636 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e329d7c7-53b2-4a02-9119-ca75c0d423fd" containerName="mariadb-account-create" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.236861 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e329d7c7-53b2-4a02-9119-ca75c0d423fd" containerName="mariadb-account-create" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.237744 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.239472 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.240791 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m46tl" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.244344 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-snckd"] Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.270139 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.351564 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.351640 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.351661 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.351700 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqgnf\" (UniqueName: \"kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.351724 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.352276 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453589 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqgnf\" (UniqueName: \"kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453704 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453746 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453796 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.453812 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.459679 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.459851 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.459981 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.460628 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.471598 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqgnf\" (UniqueName: \"kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf\") pod \"cinder-db-sync-snckd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.604464 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:49 crc kubenswrapper[4884]: I1128 16:57:49.688406 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:57:49 crc kubenswrapper[4884]: E1128 16:57:49.689029 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:57:50 crc kubenswrapper[4884]: I1128 16:57:50.073245 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-snckd"] Nov 28 16:57:50 crc kubenswrapper[4884]: I1128 16:57:50.760281 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-snckd" event={"ID":"91907454-6e9b-481a-a322-ac1cd7d79ecd","Type":"ContainerStarted","Data":"a2faa47b1e3aa901f2b2fe29590a0341f6369f79fd5dcb4b3af6858ff28311ec"} Nov 28 16:57:50 crc kubenswrapper[4884]: I1128 16:57:50.760671 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-snckd" event={"ID":"91907454-6e9b-481a-a322-ac1cd7d79ecd","Type":"ContainerStarted","Data":"b3b336df768e550a8eb30461ad71ac7097d64692cc1ccdf30e8a29bcc1ec8e00"} Nov 28 16:57:50 crc kubenswrapper[4884]: I1128 16:57:50.780659 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-snckd" podStartSLOduration=1.78063746 podStartE2EDuration="1.78063746s" podCreationTimestamp="2025-11-28 16:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:50.776571052 +0000 UTC m=+5910.339354853" watchObservedRunningTime="2025-11-28 16:57:50.78063746 +0000 UTC m=+5910.343421261" Nov 28 16:57:53 crc kubenswrapper[4884]: I1128 16:57:53.796655 4884 generic.go:334] "Generic (PLEG): container finished" podID="91907454-6e9b-481a-a322-ac1cd7d79ecd" containerID="a2faa47b1e3aa901f2b2fe29590a0341f6369f79fd5dcb4b3af6858ff28311ec" exitCode=0 Nov 28 16:57:53 crc kubenswrapper[4884]: I1128 16:57:53.796771 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-snckd" event={"ID":"91907454-6e9b-481a-a322-ac1cd7d79ecd","Type":"ContainerDied","Data":"a2faa47b1e3aa901f2b2fe29590a0341f6369f79fd5dcb4b3af6858ff28311ec"} Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.126730 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.163853 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.163901 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.164030 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqgnf\" (UniqueName: \"kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.164066 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.164106 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.164146 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id\") pod \"91907454-6e9b-481a-a322-ac1cd7d79ecd\" (UID: \"91907454-6e9b-481a-a322-ac1cd7d79ecd\") " Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.164429 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.169683 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts" (OuterVolumeSpecName: "scripts") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.169697 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf" (OuterVolumeSpecName: "kube-api-access-fqgnf") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "kube-api-access-fqgnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.170040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.190403 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.211466 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data" (OuterVolumeSpecName: "config-data") pod "91907454-6e9b-481a-a322-ac1cd7d79ecd" (UID: "91907454-6e9b-481a-a322-ac1cd7d79ecd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265548 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqgnf\" (UniqueName: \"kubernetes.io/projected/91907454-6e9b-481a-a322-ac1cd7d79ecd-kube-api-access-fqgnf\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265599 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265613 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265625 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/91907454-6e9b-481a-a322-ac1cd7d79ecd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265637 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.265648 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/91907454-6e9b-481a-a322-ac1cd7d79ecd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.816190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-snckd" event={"ID":"91907454-6e9b-481a-a322-ac1cd7d79ecd","Type":"ContainerDied","Data":"b3b336df768e550a8eb30461ad71ac7097d64692cc1ccdf30e8a29bcc1ec8e00"} Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.816226 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3b336df768e550a8eb30461ad71ac7097d64692cc1ccdf30e8a29bcc1ec8e00" Nov 28 16:57:55 crc kubenswrapper[4884]: I1128 16:57:55.816268 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-snckd" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.132160 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 16:57:56 crc kubenswrapper[4884]: E1128 16:57:56.132632 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91907454-6e9b-481a-a322-ac1cd7d79ecd" containerName="cinder-db-sync" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.132648 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="91907454-6e9b-481a-a322-ac1cd7d79ecd" containerName="cinder-db-sync" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.132886 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="91907454-6e9b-481a-a322-ac1cd7d79ecd" containerName="cinder-db-sync" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.134137 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.152565 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.281776 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjxgj\" (UniqueName: \"kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.282182 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.282231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.282322 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.282427 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.384823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.384908 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.384945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjxgj\" (UniqueName: \"kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.384965 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.384993 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.385978 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.387076 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.387154 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.387648 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.388219 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.389908 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.393102 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-m46tl" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.393237 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.393245 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.393495 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.412627 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.415699 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjxgj\" (UniqueName: \"kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj\") pod \"dnsmasq-dns-554cf9d48c-cltfv\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.456799 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.588825 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.588881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.588918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.588947 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.589001 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.589071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.589660 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htmfn\" (UniqueName: \"kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.690654 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.690918 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.690961 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htmfn\" (UniqueName: \"kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.691005 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.691028 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.691050 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.691067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.700950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.701129 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.711456 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.722628 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.735068 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.735996 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:56 crc kubenswrapper[4884]: I1128 16:57:56.756904 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htmfn\" (UniqueName: \"kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn\") pod \"cinder-api-0\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " pod="openstack/cinder-api-0" Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.007964 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.041041 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.456558 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.834363 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerStarted","Data":"6b9585b000ce05da37a5a8c38359d55619ae12eea4dc576afd18843c787058d5"} Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.839419 4884 generic.go:334] "Generic (PLEG): container finished" podID="c104c909-309e-4223-9d7e-4219963e167e" containerID="4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0" exitCode=0 Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.839453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" event={"ID":"c104c909-309e-4223-9d7e-4219963e167e","Type":"ContainerDied","Data":"4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0"} Nov 28 16:57:57 crc kubenswrapper[4884]: I1128 16:57:57.839473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" event={"ID":"c104c909-309e-4223-9d7e-4219963e167e","Type":"ContainerStarted","Data":"a0aaa50c58a8c6a34158e30087dd913427fffcdac1913e60a7dfbc608c7a56cc"} Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.853191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" event={"ID":"c104c909-309e-4223-9d7e-4219963e167e","Type":"ContainerStarted","Data":"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c"} Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.853558 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.855370 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerStarted","Data":"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857"} Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.855428 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerStarted","Data":"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b"} Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.857536 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.875955 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" podStartSLOduration=2.875935622 podStartE2EDuration="2.875935622s" podCreationTimestamp="2025-11-28 16:57:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:58.873516413 +0000 UTC m=+5918.436300224" watchObservedRunningTime="2025-11-28 16:57:58.875935622 +0000 UTC m=+5918.438719423" Nov 28 16:57:58 crc kubenswrapper[4884]: I1128 16:57:58.902986 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.902963664 podStartE2EDuration="2.902963664s" podCreationTimestamp="2025-11-28 16:57:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:57:58.893047801 +0000 UTC m=+5918.455831602" watchObservedRunningTime="2025-11-28 16:57:58.902963664 +0000 UTC m=+5918.465747475" Nov 28 16:58:01 crc kubenswrapper[4884]: I1128 16:58:01.689407 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:58:01 crc kubenswrapper[4884]: E1128 16:58:01.690453 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:58:06 crc kubenswrapper[4884]: I1128 16:58:06.459370 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 16:58:06 crc kubenswrapper[4884]: I1128 16:58:06.539041 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:58:06 crc kubenswrapper[4884]: I1128 16:58:06.539355 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-794688c677-msx6x" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="dnsmasq-dns" containerID="cri-o://906d5fbe19f4797529b32fffa4c0d001766cb787ae43969e3b2ec74d3c2c0392" gracePeriod=10 Nov 28 16:58:06 crc kubenswrapper[4884]: I1128 16:58:06.935822 4884 generic.go:334] "Generic (PLEG): container finished" podID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerID="906d5fbe19f4797529b32fffa4c0d001766cb787ae43969e3b2ec74d3c2c0392" exitCode=0 Nov 28 16:58:06 crc kubenswrapper[4884]: I1128 16:58:06.936031 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794688c677-msx6x" event={"ID":"177f87bf-3e45-4455-947e-e40d0ec1b251","Type":"ContainerDied","Data":"906d5fbe19f4797529b32fffa4c0d001766cb787ae43969e3b2ec74d3c2c0392"} Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.057834 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.193543 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config\") pod \"177f87bf-3e45-4455-947e-e40d0ec1b251\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.193586 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc\") pod \"177f87bf-3e45-4455-947e-e40d0ec1b251\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.193637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb\") pod \"177f87bf-3e45-4455-947e-e40d0ec1b251\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.193685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb\") pod \"177f87bf-3e45-4455-947e-e40d0ec1b251\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.193747 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bp9jv\" (UniqueName: \"kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv\") pod \"177f87bf-3e45-4455-947e-e40d0ec1b251\" (UID: \"177f87bf-3e45-4455-947e-e40d0ec1b251\") " Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.219905 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv" (OuterVolumeSpecName: "kube-api-access-bp9jv") pod "177f87bf-3e45-4455-947e-e40d0ec1b251" (UID: "177f87bf-3e45-4455-947e-e40d0ec1b251"). InnerVolumeSpecName "kube-api-access-bp9jv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.247582 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "177f87bf-3e45-4455-947e-e40d0ec1b251" (UID: "177f87bf-3e45-4455-947e-e40d0ec1b251"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.250499 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "177f87bf-3e45-4455-947e-e40d0ec1b251" (UID: "177f87bf-3e45-4455-947e-e40d0ec1b251"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.256585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "177f87bf-3e45-4455-947e-e40d0ec1b251" (UID: "177f87bf-3e45-4455-947e-e40d0ec1b251"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.259654 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config" (OuterVolumeSpecName: "config") pod "177f87bf-3e45-4455-947e-e40d0ec1b251" (UID: "177f87bf-3e45-4455-947e-e40d0ec1b251"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.295693 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.295732 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.295743 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bp9jv\" (UniqueName: \"kubernetes.io/projected/177f87bf-3e45-4455-947e-e40d0ec1b251-kube-api-access-bp9jv\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.295757 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-config\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.295767 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/177f87bf-3e45-4455-947e-e40d0ec1b251-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.948338 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-794688c677-msx6x" event={"ID":"177f87bf-3e45-4455-947e-e40d0ec1b251","Type":"ContainerDied","Data":"b5cb52f00deb26cac2e5041f55f8cfda2d3789536625d5990229bf0d42b2839b"} Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.948395 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-794688c677-msx6x" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.948672 4884 scope.go:117] "RemoveContainer" containerID="906d5fbe19f4797529b32fffa4c0d001766cb787ae43969e3b2ec74d3c2c0392" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.973909 4884 scope.go:117] "RemoveContainer" containerID="d5e40fc8a2307c6b7a94e169c1da6bdd582baaf7b95086a889ceab451c89faf2" Nov 28 16:58:07 crc kubenswrapper[4884]: I1128 16:58:07.990369 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.004412 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-794688c677-msx6x"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.021618 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.021851 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" containerName="nova-scheduler-scheduler" containerID="cri-o://8da3996f6b4c381e20c1561577bec6d68dd54693d7e39365e7ae84bb77814f01" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.037905 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.038169 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" containerName="nova-cell0-conductor-conductor" containerID="cri-o://5a9d45c2f5bf198e2fd564802cd4d60507f8ab22f928d7f0d518b7902144efc4" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.061646 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.061947 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-log" containerID="cri-o://ec0dea92d0e99dc3cd9129a6395fb3b20e5f1893b20089b1a6960f34201514f7" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.062488 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-api" containerID="cri-o://23fa4b8bcccab825e1fdc70642491bed705058d294a71a1edcfa31ff79db1320" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.085578 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.085822 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.102965 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.103263 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" containerID="cri-o://e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.103299 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" containerID="cri-o://5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb" gracePeriod=30 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.729259 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" path="/var/lib/kubelet/pods/177f87bf-3e45-4455-947e-e40d0ec1b251/volumes" Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.916566 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.967480 4884 generic.go:334] "Generic (PLEG): container finished" podID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerID="e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4" exitCode=143 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.967550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerDied","Data":"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4"} Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.969698 4884 generic.go:334] "Generic (PLEG): container finished" podID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" containerID="129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961" exitCode=0 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.969855 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.970397 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0","Type":"ContainerDied","Data":"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961"} Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.970467 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0","Type":"ContainerDied","Data":"5dc2928ec3ad88a1bf9392f55d6f993239a1cfe8d944997781f6e5fc48e1fb60"} Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.970493 4884 scope.go:117] "RemoveContainer" containerID="129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961" Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.981668 4884 generic.go:334] "Generic (PLEG): container finished" podID="8e374b20-e3f7-46ea-8489-3874c4778557" containerID="ec0dea92d0e99dc3cd9129a6395fb3b20e5f1893b20089b1a6960f34201514f7" exitCode=143 Nov 28 16:58:08 crc kubenswrapper[4884]: I1128 16:58:08.981722 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerDied","Data":"ec0dea92d0e99dc3cd9129a6395fb3b20e5f1893b20089b1a6960f34201514f7"} Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.014345 4884 scope.go:117] "RemoveContainer" containerID="129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961" Nov 28 16:58:09 crc kubenswrapper[4884]: E1128 16:58:09.014914 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961\": container with ID starting with 129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961 not found: ID does not exist" containerID="129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.014958 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961"} err="failed to get container status \"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961\": rpc error: code = NotFound desc = could not find container \"129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961\": container with ID starting with 129b112f32099606c5f1ad2c6c7797e990872f5d08c910c34d3c01d92a58f961 not found: ID does not exist" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.024791 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle\") pod \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.025406 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data\") pod \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.025541 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86rdz\" (UniqueName: \"kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz\") pod \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\" (UID: \"2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0\") " Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.046431 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz" (OuterVolumeSpecName: "kube-api-access-86rdz") pod "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" (UID: "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0"). InnerVolumeSpecName "kube-api-access-86rdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.059648 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" (UID: "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.059702 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data" (OuterVolumeSpecName: "config-data") pod "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" (UID: "2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.068859 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.127758 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.127969 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.128028 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86rdz\" (UniqueName: \"kubernetes.io/projected/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0-kube-api-access-86rdz\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.315375 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.324830 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342246 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:09 crc kubenswrapper[4884]: E1128 16:58:09.342665 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="init" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342682 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="init" Nov 28 16:58:09 crc kubenswrapper[4884]: E1128 16:58:09.342706 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342714 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:58:09 crc kubenswrapper[4884]: E1128 16:58:09.342728 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="dnsmasq-dns" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342733 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="dnsmasq-dns" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342892 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="177f87bf-3e45-4455-947e-e40d0ec1b251" containerName="dnsmasq-dns" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.342918 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.343654 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.346185 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.357321 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.435726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.436066 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fww8t\" (UniqueName: \"kubernetes.io/projected/f3fabc95-602e-4977-b1e7-2a7eebb084c9-kube-api-access-fww8t\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.436350 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.538669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.539232 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.539434 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fww8t\" (UniqueName: \"kubernetes.io/projected/f3fabc95-602e-4977-b1e7-2a7eebb084c9-kube-api-access-fww8t\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.544866 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.545975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3fabc95-602e-4977-b1e7-2a7eebb084c9-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.562245 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fww8t\" (UniqueName: \"kubernetes.io/projected/f3fabc95-602e-4977-b1e7-2a7eebb084c9-kube-api-access-fww8t\") pod \"nova-cell1-novncproxy-0\" (UID: \"f3fabc95-602e-4977-b1e7-2a7eebb084c9\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:09 crc kubenswrapper[4884]: I1128 16:58:09.661795 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.014140 4884 generic.go:334] "Generic (PLEG): container finished" podID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" containerID="8da3996f6b4c381e20c1561577bec6d68dd54693d7e39365e7ae84bb77814f01" exitCode=0 Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.014214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ca70283-cb6d-47ca-8039-a2ec6c4af560","Type":"ContainerDied","Data":"8da3996f6b4c381e20c1561577bec6d68dd54693d7e39365e7ae84bb77814f01"} Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.076134 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.193969 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 16:58:10 crc kubenswrapper[4884]: W1128 16:58:10.196218 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3fabc95_602e_4977_b1e7_2a7eebb084c9.slice/crio-27ef0fe037d3951a1961951f8b6d027eaf93cbad2d0d9dd98f26daea9d4684b3 WatchSource:0}: Error finding container 27ef0fe037d3951a1961951f8b6d027eaf93cbad2d0d9dd98f26daea9d4684b3: Status 404 returned error can't find the container with id 27ef0fe037d3951a1961951f8b6d027eaf93cbad2d0d9dd98f26daea9d4684b3 Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.263950 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data\") pod \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.264320 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw7g5\" (UniqueName: \"kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5\") pod \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.264485 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle\") pod \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\" (UID: \"9ca70283-cb6d-47ca-8039-a2ec6c4af560\") " Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.269589 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5" (OuterVolumeSpecName: "kube-api-access-tw7g5") pod "9ca70283-cb6d-47ca-8039-a2ec6c4af560" (UID: "9ca70283-cb6d-47ca-8039-a2ec6c4af560"). InnerVolumeSpecName "kube-api-access-tw7g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.292932 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data" (OuterVolumeSpecName: "config-data") pod "9ca70283-cb6d-47ca-8039-a2ec6c4af560" (UID: "9ca70283-cb6d-47ca-8039-a2ec6c4af560"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.295393 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ca70283-cb6d-47ca-8039-a2ec6c4af560" (UID: "9ca70283-cb6d-47ca-8039-a2ec6c4af560"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.366770 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.366798 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw7g5\" (UniqueName: \"kubernetes.io/projected/9ca70283-cb6d-47ca-8039-a2ec6c4af560-kube-api-access-tw7g5\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.366807 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ca70283-cb6d-47ca-8039-a2ec6c4af560-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:10 crc kubenswrapper[4884]: I1128 16:58:10.705054 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0" path="/var/lib/kubelet/pods/2ad0cb90-122b-49a9-a4bb-7fc3488ad6e0/volumes" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.025651 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.026632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"9ca70283-cb6d-47ca-8039-a2ec6c4af560","Type":"ContainerDied","Data":"60cf18df933e4259bc0c429946e55b5334d5afebe385708294bc6be4757bcd6b"} Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.026692 4884 scope.go:117] "RemoveContainer" containerID="8da3996f6b4c381e20c1561577bec6d68dd54693d7e39365e7ae84bb77814f01" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.029514 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f3fabc95-602e-4977-b1e7-2a7eebb084c9","Type":"ContainerStarted","Data":"bbd36c000e2bb2e816fa6ae3001f726df327206c3fb80f7aafb4b3eaf218d1cf"} Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.029635 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f3fabc95-602e-4977-b1e7-2a7eebb084c9","Type":"ContainerStarted","Data":"27ef0fe037d3951a1961951f8b6d027eaf93cbad2d0d9dd98f26daea9d4684b3"} Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.072311 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.072291501 podStartE2EDuration="2.072291501s" podCreationTimestamp="2025-11-28 16:58:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:11.046177202 +0000 UTC m=+5930.608961003" watchObservedRunningTime="2025-11-28 16:58:11.072291501 +0000 UTC m=+5930.635075292" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.083942 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.097224 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.106040 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:11 crc kubenswrapper[4884]: E1128 16:58:11.106587 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" containerName="nova-scheduler-scheduler" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.106610 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" containerName="nova-scheduler-scheduler" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.106831 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" containerName="nova-scheduler-scheduler" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.107611 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.118109 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.122776 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.248884 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": read tcp 10.217.0.2:37510->10.217.1.70:8775: read: connection reset by peer" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.249302 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.70:8775/\": read tcp 10.217.0.2:37526->10.217.1.70:8775: read: connection reset by peer" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.285424 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jchcp\" (UniqueName: \"kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.285528 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.285597 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.387957 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.388034 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.388248 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jchcp\" (UniqueName: \"kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.393733 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.407802 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.409399 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jchcp\" (UniqueName: \"kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp\") pod \"nova-scheduler-0\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.429919 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.616973 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.617485 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" containerName="nova-cell1-conductor-conductor" containerID="cri-o://ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" gracePeriod=30 Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.730745 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.903054 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs\") pod \"356a312a-4db2-4488-b4e4-6dffdc6640ac\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.903128 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9j4x\" (UniqueName: \"kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x\") pod \"356a312a-4db2-4488-b4e4-6dffdc6640ac\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.903237 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data\") pod \"356a312a-4db2-4488-b4e4-6dffdc6640ac\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.903292 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle\") pod \"356a312a-4db2-4488-b4e4-6dffdc6640ac\" (UID: \"356a312a-4db2-4488-b4e4-6dffdc6640ac\") " Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.903866 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs" (OuterVolumeSpecName: "logs") pod "356a312a-4db2-4488-b4e4-6dffdc6640ac" (UID: "356a312a-4db2-4488-b4e4-6dffdc6640ac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.934184 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x" (OuterVolumeSpecName: "kube-api-access-p9j4x") pod "356a312a-4db2-4488-b4e4-6dffdc6640ac" (UID: "356a312a-4db2-4488-b4e4-6dffdc6640ac"). InnerVolumeSpecName "kube-api-access-p9j4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.971264 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data" (OuterVolumeSpecName: "config-data") pod "356a312a-4db2-4488-b4e4-6dffdc6640ac" (UID: "356a312a-4db2-4488-b4e4-6dffdc6640ac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:11 crc kubenswrapper[4884]: I1128 16:58:11.992294 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.006791 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/356a312a-4db2-4488-b4e4-6dffdc6640ac-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.006816 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9j4x\" (UniqueName: \"kubernetes.io/projected/356a312a-4db2-4488-b4e4-6dffdc6640ac-kube-api-access-p9j4x\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.006824 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.067299 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "356a312a-4db2-4488-b4e4-6dffdc6640ac" (UID: "356a312a-4db2-4488-b4e4-6dffdc6640ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.089270 4884 generic.go:334] "Generic (PLEG): container finished" podID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" containerID="5a9d45c2f5bf198e2fd564802cd4d60507f8ab22f928d7f0d518b7902144efc4" exitCode=0 Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.089342 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a","Type":"ContainerDied","Data":"5a9d45c2f5bf198e2fd564802cd4d60507f8ab22f928d7f0d518b7902144efc4"} Nov 28 16:58:12 crc kubenswrapper[4884]: W1128 16:58:12.090397 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c9041b2_31ca_4c8f_a9c6_f39fe85f1860.slice/crio-acfc28e925a1ea951b9a4a96f8c0dcfc77d21f106d9b720d49a1231b96a418eb WatchSource:0}: Error finding container acfc28e925a1ea951b9a4a96f8c0dcfc77d21f106d9b720d49a1231b96a418eb: Status 404 returned error can't find the container with id acfc28e925a1ea951b9a4a96f8c0dcfc77d21f106d9b720d49a1231b96a418eb Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.096185 4884 generic.go:334] "Generic (PLEG): container finished" podID="8e374b20-e3f7-46ea-8489-3874c4778557" containerID="23fa4b8bcccab825e1fdc70642491bed705058d294a71a1edcfa31ff79db1320" exitCode=0 Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.096247 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerDied","Data":"23fa4b8bcccab825e1fdc70642491bed705058d294a71a1edcfa31ff79db1320"} Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.101742 4884 generic.go:334] "Generic (PLEG): container finished" podID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerID="5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb" exitCode=0 Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.102178 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerDied","Data":"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb"} Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.102210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"356a312a-4db2-4488-b4e4-6dffdc6640ac","Type":"ContainerDied","Data":"3835a9ae6208f37052ea8d9c54c942d9f7b41589215e8e2842b94b77a0cb63c2"} Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.102226 4884 scope.go:117] "RemoveContainer" containerID="5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.103296 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.130781 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/356a312a-4db2-4488-b4e4-6dffdc6640ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.184752 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.202901 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.204595 4884 scope.go:117] "RemoveContainer" containerID="e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.211422 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.213106 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.213637 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.213664 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.213688 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-log" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.213699 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-log" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.213713 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-api" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.213720 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-api" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.213745 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.213753 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.214018 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-api" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.214044 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-log" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.214068 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" containerName="nova-api-log" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.214101 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" containerName="nova-metadata-metadata" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.215737 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.217390 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.222052 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233377 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle\") pod \"8e374b20-e3f7-46ea-8489-3874c4778557\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233422 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs\") pod \"8e374b20-e3f7-46ea-8489-3874c4778557\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233441 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data\") pod \"8e374b20-e3f7-46ea-8489-3874c4778557\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233526 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lghjx\" (UniqueName: \"kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx\") pod \"8e374b20-e3f7-46ea-8489-3874c4778557\" (UID: \"8e374b20-e3f7-46ea-8489-3874c4778557\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233653 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttrzj\" (UniqueName: \"kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233795 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.233864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.240280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs" (OuterVolumeSpecName: "logs") pod "8e374b20-e3f7-46ea-8489-3874c4778557" (UID: "8e374b20-e3f7-46ea-8489-3874c4778557"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.269438 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx" (OuterVolumeSpecName: "kube-api-access-lghjx") pod "8e374b20-e3f7-46ea-8489-3874c4778557" (UID: "8e374b20-e3f7-46ea-8489-3874c4778557"). InnerVolumeSpecName "kube-api-access-lghjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.277780 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e374b20-e3f7-46ea-8489-3874c4778557" (UID: "8e374b20-e3f7-46ea-8489-3874c4778557"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.289666 4884 scope.go:117] "RemoveContainer" containerID="5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.294314 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb\": container with ID starting with 5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb not found: ID does not exist" containerID="5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.294357 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb"} err="failed to get container status \"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb\": rpc error: code = NotFound desc = could not find container \"5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb\": container with ID starting with 5f948890e050a26ac68f3286632d7c6d235424fdbe3e25d04c18a8f2e9394ceb not found: ID does not exist" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.294382 4884 scope.go:117] "RemoveContainer" containerID="e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.295148 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4\": container with ID starting with e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4 not found: ID does not exist" containerID="e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.295199 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4"} err="failed to get container status \"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4\": rpc error: code = NotFound desc = could not find container \"e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4\": container with ID starting with e38b75306191d87724221a03275af434cdd938ade0783ef4063d60edaa4841f4 not found: ID does not exist" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.296772 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data" (OuterVolumeSpecName: "config-data") pod "8e374b20-e3f7-46ea-8489-3874c4778557" (UID: "8e374b20-e3f7-46ea-8489-3874c4778557"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.339075 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttrzj\" (UniqueName: \"kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.339462 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340353 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.339534 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340459 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340648 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lghjx\" (UniqueName: \"kubernetes.io/projected/8e374b20-e3f7-46ea-8489-3874c4778557-kube-api-access-lghjx\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340669 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340681 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e374b20-e3f7-46ea-8489-3874c4778557-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.340692 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e374b20-e3f7-46ea-8489-3874c4778557-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.346638 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.347032 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.347916 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.356643 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttrzj\" (UniqueName: \"kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj\") pod \"nova-metadata-0\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.360150 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.361613 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.361656 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" containerName="nova-cell1-conductor-conductor" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.450765 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.542638 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle\") pod \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.542723 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n86k7\" (UniqueName: \"kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7\") pod \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.542829 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") pod \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.546819 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7" (OuterVolumeSpecName: "kube-api-access-n86k7") pod "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" (UID: "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a"). InnerVolumeSpecName "kube-api-access-n86k7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: E1128 16:58:12.572260 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data podName:5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a nodeName:}" failed. No retries permitted until 2025-11-28 16:58:13.072228006 +0000 UTC m=+5932.635011827 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data") pod "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" (UID: "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a") : error deleting /var/lib/kubelet/pods/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a/volume-subpaths: remove /var/lib/kubelet/pods/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a/volume-subpaths: no such file or directory Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.574799 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" (UID: "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.602562 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.645215 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.645260 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n86k7\" (UniqueName: \"kubernetes.io/projected/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-kube-api-access-n86k7\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.725846 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="356a312a-4db2-4488-b4e4-6dffdc6640ac" path="/var/lib/kubelet/pods/356a312a-4db2-4488-b4e4-6dffdc6640ac/volumes" Nov 28 16:58:12 crc kubenswrapper[4884]: I1128 16:58:12.727219 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ca70283-cb6d-47ca-8039-a2ec6c4af560" path="/var/lib/kubelet/pods/9ca70283-cb6d-47ca-8039-a2ec6c4af560/volumes" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.050863 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: W1128 16:58:13.062881 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3af77193_c23c_451b_84b1_342985e2fe7e.slice/crio-26fcec31ae02c03c343f534a7935a900909168052febce1926bf78bfdc0b1cf4 WatchSource:0}: Error finding container 26fcec31ae02c03c343f534a7935a900909168052febce1926bf78bfdc0b1cf4: Status 404 returned error can't find the container with id 26fcec31ae02c03c343f534a7935a900909168052febce1926bf78bfdc0b1cf4 Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.112232 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a","Type":"ContainerDied","Data":"947a1c2d3e795cc7f048ce530666517e7df7faf7b61acbe38416992695b89239"} Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.112284 4884 scope.go:117] "RemoveContainer" containerID="5a9d45c2f5bf198e2fd564802cd4d60507f8ab22f928d7f0d518b7902144efc4" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.112286 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.113797 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860","Type":"ContainerStarted","Data":"4b0dfc644389b7e67d38b0db9eca1ace7fc7b6bb8212cb092d00f1b221f25d62"} Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.113827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860","Type":"ContainerStarted","Data":"acfc28e925a1ea951b9a4a96f8c0dcfc77d21f106d9b720d49a1231b96a418eb"} Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.117420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8e374b20-e3f7-46ea-8489-3874c4778557","Type":"ContainerDied","Data":"2bc9db0723a2b5540e862749e776a34060bb778ce3491187769c646533dac39c"} Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.117520 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.121394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerStarted","Data":"26fcec31ae02c03c343f534a7935a900909168052febce1926bf78bfdc0b1cf4"} Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.134912 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.134891005 podStartE2EDuration="2.134891005s" podCreationTimestamp="2025-11-28 16:58:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:13.129465652 +0000 UTC m=+5932.692249453" watchObservedRunningTime="2025-11-28 16:58:13.134891005 +0000 UTC m=+5932.697674806" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.157155 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") pod \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\" (UID: \"5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a\") " Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.163617 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data" (OuterVolumeSpecName: "config-data") pod "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" (UID: "5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.167359 4884 scope.go:117] "RemoveContainer" containerID="23fa4b8bcccab825e1fdc70642491bed705058d294a71a1edcfa31ff79db1320" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.229368 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.233182 4884 scope.go:117] "RemoveContainer" containerID="ec0dea92d0e99dc3cd9129a6395fb3b20e5f1893b20089b1a6960f34201514f7" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.243055 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.259508 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.262477 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: E1128 16:58:13.262828 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" containerName="nova-cell0-conductor-conductor" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.262844 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" containerName="nova-cell0-conductor-conductor" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.263043 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" containerName="nova-cell0-conductor-conductor" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.263972 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.266018 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.278034 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.361244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.361371 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.361411 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.361474 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr2xz\" (UniqueName: \"kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.453922 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.462606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr2xz\" (UniqueName: \"kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.462664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.462749 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.462778 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.466607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.466859 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.467595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.470829 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.483371 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.484968 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.486886 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.493898 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr2xz\" (UniqueName: \"kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz\") pod \"nova-api-0\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.496475 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.563551 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsms8\" (UniqueName: \"kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.563619 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.563899 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.607519 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.665438 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.665563 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsms8\" (UniqueName: \"kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.665596 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.672750 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.672928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.688180 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:58:13 crc kubenswrapper[4884]: E1128 16:58:13.688582 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.688887 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsms8\" (UniqueName: \"kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8\") pod \"nova-cell0-conductor-0\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:13 crc kubenswrapper[4884]: I1128 16:58:13.814616 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.134752 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.149920 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerStarted","Data":"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb"} Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.149962 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerStarted","Data":"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c"} Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.156254 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerStarted","Data":"3993e347c1704a2bd15a1b21acaa3ae5c2f6566f44aebee39c7fca62ef2d3a8c"} Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.172401 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.172374873 podStartE2EDuration="2.172374873s" podCreationTimestamp="2025-11-28 16:58:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:14.169113934 +0000 UTC m=+5933.731897735" watchObservedRunningTime="2025-11-28 16:58:14.172374873 +0000 UTC m=+5933.735158674" Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.326305 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.661890 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.714967 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a" path="/var/lib/kubelet/pods/5a85ed4d-ec4b-4ac1-b5b6-0abc95f97d7a/volumes" Nov 28 16:58:14 crc kubenswrapper[4884]: I1128 16:58:14.715923 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e374b20-e3f7-46ea-8489-3874c4778557" path="/var/lib/kubelet/pods/8e374b20-e3f7-46ea-8489-3874c4778557/volumes" Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.172312 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerStarted","Data":"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332"} Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.172378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerStarted","Data":"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b"} Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.175398 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93bec16-fe33-4ed0-b34d-0ba87456cb5f","Type":"ContainerStarted","Data":"f972c40085455d8433c3a7023300422f477d84bb2982dade567f0aec3613c9fa"} Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.175481 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.175502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93bec16-fe33-4ed0-b34d-0ba87456cb5f","Type":"ContainerStarted","Data":"6b0d788b95fec155be60a647bb55e14f11f36233e077fe524318b5725944146d"} Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.198200 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.198180426 podStartE2EDuration="2.198180426s" podCreationTimestamp="2025-11-28 16:58:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:15.193269456 +0000 UTC m=+5934.756053307" watchObservedRunningTime="2025-11-28 16:58:15.198180426 +0000 UTC m=+5934.760964217" Nov 28 16:58:15 crc kubenswrapper[4884]: I1128 16:58:15.229185 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.229166955 podStartE2EDuration="2.229166955s" podCreationTimestamp="2025-11-28 16:58:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:15.22039892 +0000 UTC m=+5934.783182781" watchObservedRunningTime="2025-11-28 16:58:15.229166955 +0000 UTC m=+5934.791950756" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.430787 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.618749 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.734374 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4m5l\" (UniqueName: \"kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l\") pod \"3946aff4-5d46-4047-b17f-b92772d9477d\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.734695 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle\") pod \"3946aff4-5d46-4047-b17f-b92772d9477d\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.734738 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data\") pod \"3946aff4-5d46-4047-b17f-b92772d9477d\" (UID: \"3946aff4-5d46-4047-b17f-b92772d9477d\") " Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.739523 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l" (OuterVolumeSpecName: "kube-api-access-d4m5l") pod "3946aff4-5d46-4047-b17f-b92772d9477d" (UID: "3946aff4-5d46-4047-b17f-b92772d9477d"). InnerVolumeSpecName "kube-api-access-d4m5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.762293 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data" (OuterVolumeSpecName: "config-data") pod "3946aff4-5d46-4047-b17f-b92772d9477d" (UID: "3946aff4-5d46-4047-b17f-b92772d9477d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.766651 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3946aff4-5d46-4047-b17f-b92772d9477d" (UID: "3946aff4-5d46-4047-b17f-b92772d9477d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.836972 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.837001 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3946aff4-5d46-4047-b17f-b92772d9477d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:16 crc kubenswrapper[4884]: I1128 16:58:16.837011 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4m5l\" (UniqueName: \"kubernetes.io/projected/3946aff4-5d46-4047-b17f-b92772d9477d-kube-api-access-d4m5l\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.196042 4884 generic.go:334] "Generic (PLEG): container finished" podID="3946aff4-5d46-4047-b17f-b92772d9477d" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" exitCode=0 Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.196082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3946aff4-5d46-4047-b17f-b92772d9477d","Type":"ContainerDied","Data":"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e"} Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.196193 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.196212 4884 scope.go:117] "RemoveContainer" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.196198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3946aff4-5d46-4047-b17f-b92772d9477d","Type":"ContainerDied","Data":"67cb62dab563fed866b4e8226685aee5df76b152a66046b3077bd0cdb61aceaf"} Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.213415 4884 scope.go:117] "RemoveContainer" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" Nov 28 16:58:17 crc kubenswrapper[4884]: E1128 16:58:17.213804 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e\": container with ID starting with ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e not found: ID does not exist" containerID="ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.213833 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e"} err="failed to get container status \"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e\": rpc error: code = NotFound desc = could not find container \"ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e\": container with ID starting with ca914755fa121872d69ba83f74e7520f48ad76355b340187a390ea8c9568d03e not found: ID does not exist" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.241874 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.256626 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.267824 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:17 crc kubenswrapper[4884]: E1128 16:58:17.268328 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" containerName="nova-cell1-conductor-conductor" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.268351 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" containerName="nova-cell1-conductor-conductor" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.268535 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" containerName="nova-cell1-conductor-conductor" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.269188 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.274511 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.279362 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.454016 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.454063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t25m\" (UniqueName: \"kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.454129 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.557109 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.557159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t25m\" (UniqueName: \"kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.557199 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.561349 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.561992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.583218 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t25m\" (UniqueName: \"kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m\") pod \"nova-cell1-conductor-0\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.591659 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.603390 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:58:17 crc kubenswrapper[4884]: I1128 16:58:17.604255 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:58:18 crc kubenswrapper[4884]: I1128 16:58:18.083931 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:58:18 crc kubenswrapper[4884]: W1128 16:58:18.088641 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6562624b_a1aa_4825_9f04_aaef8f125204.slice/crio-142f8289bce49ef2e68029b468c804cadc28541531026971cac88384e4523e69 WatchSource:0}: Error finding container 142f8289bce49ef2e68029b468c804cadc28541531026971cac88384e4523e69: Status 404 returned error can't find the container with id 142f8289bce49ef2e68029b468c804cadc28541531026971cac88384e4523e69 Nov 28 16:58:18 crc kubenswrapper[4884]: I1128 16:58:18.209508 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6562624b-a1aa-4825-9f04-aaef8f125204","Type":"ContainerStarted","Data":"142f8289bce49ef2e68029b468c804cadc28541531026971cac88384e4523e69"} Nov 28 16:58:18 crc kubenswrapper[4884]: I1128 16:58:18.710856 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3946aff4-5d46-4047-b17f-b92772d9477d" path="/var/lib/kubelet/pods/3946aff4-5d46-4047-b17f-b92772d9477d/volumes" Nov 28 16:58:19 crc kubenswrapper[4884]: I1128 16:58:19.221978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6562624b-a1aa-4825-9f04-aaef8f125204","Type":"ContainerStarted","Data":"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb"} Nov 28 16:58:19 crc kubenswrapper[4884]: I1128 16:58:19.222359 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:19 crc kubenswrapper[4884]: I1128 16:58:19.244716 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.244688139 podStartE2EDuration="2.244688139s" podCreationTimestamp="2025-11-28 16:58:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:19.238161779 +0000 UTC m=+5938.800945590" watchObservedRunningTime="2025-11-28 16:58:19.244688139 +0000 UTC m=+5938.807471940" Nov 28 16:58:19 crc kubenswrapper[4884]: I1128 16:58:19.662123 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:19 crc kubenswrapper[4884]: I1128 16:58:19.677933 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:20 crc kubenswrapper[4884]: I1128 16:58:20.237708 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 16:58:21 crc kubenswrapper[4884]: I1128 16:58:21.430535 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:58:21 crc kubenswrapper[4884]: I1128 16:58:21.477578 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:58:22 crc kubenswrapper[4884]: I1128 16:58:22.283147 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:58:22 crc kubenswrapper[4884]: I1128 16:58:22.605934 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:58:22 crc kubenswrapper[4884]: I1128 16:58:22.605987 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:58:23 crc kubenswrapper[4884]: I1128 16:58:23.607706 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:58:23 crc kubenswrapper[4884]: I1128 16:58:23.608150 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:58:23 crc kubenswrapper[4884]: I1128 16:58:23.690318 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.81:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:58:23 crc kubenswrapper[4884]: I1128 16:58:23.690926 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.81:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:58:23 crc kubenswrapper[4884]: I1128 16:58:23.874923 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 16:58:24 crc kubenswrapper[4884]: I1128 16:58:24.649296 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:58:24 crc kubenswrapper[4884]: I1128 16:58:24.691314 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.82:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:58:25 crc kubenswrapper[4884]: I1128 16:58:25.689309 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:58:25 crc kubenswrapper[4884]: E1128 16:58:25.689518 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.749187 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.751064 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.752926 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.762912 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.941863 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.942469 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.942490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.942509 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.942529 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9jll\" (UniqueName: \"kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:26 crc kubenswrapper[4884]: I1128 16:58:26.942558 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045338 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045392 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045433 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045468 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9jll\" (UniqueName: \"kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045521 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.045544 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.051411 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.051728 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.052130 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.052989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.082973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9jll\" (UniqueName: \"kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll\") pod \"cinder-scheduler-0\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.088629 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.586995 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:27 crc kubenswrapper[4884]: W1128 16:58:27.588031 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6fe47c6_1822_4b45_9a30_93cc3f66dd3d.slice/crio-fa1c234cce440606805020059f0cbac6de2d0039100696488300e82e8acf785b WatchSource:0}: Error finding container fa1c234cce440606805020059f0cbac6de2d0039100696488300e82e8acf785b: Status 404 returned error can't find the container with id fa1c234cce440606805020059f0cbac6de2d0039100696488300e82e8acf785b Nov 28 16:58:27 crc kubenswrapper[4884]: I1128 16:58:27.625033 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.162932 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.163391 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api-log" containerID="cri-o://eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b" gracePeriod=30 Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.163759 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api" containerID="cri-o://e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857" gracePeriod=30 Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.319699 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerStarted","Data":"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1"} Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.319738 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerStarted","Data":"fa1c234cce440606805020059f0cbac6de2d0039100696488300e82e8acf785b"} Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.322019 4884 generic.go:334] "Generic (PLEG): container finished" podID="6766345a-0cd7-424c-9924-93a60a312625" containerID="eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b" exitCode=143 Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.322070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerDied","Data":"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b"} Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.752057 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.754608 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.761452 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.768725 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891311 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891504 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891620 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891697 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891765 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.891908 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892000 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892113 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-run\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892161 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892319 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892367 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjbkf\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-kube-api-access-hjbkf\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892407 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892531 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892594 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.892629 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995032 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995115 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjbkf\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-kube-api-access-hjbkf\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995148 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995177 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995348 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995381 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995409 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995434 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995459 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995494 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995523 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-run\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995581 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.995786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.996019 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.996130 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.996312 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.996342 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-dev\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:28 crc kubenswrapper[4884]: I1128 16:58:28.996369 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-sys\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000034 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000061 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-run\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000226 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/8602ccbe-13e0-4f57-8794-1e2f86802ae1-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.000886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.001625 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.001936 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.003056 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8602ccbe-13e0-4f57-8794-1e2f86802ae1-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.018589 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjbkf\" (UniqueName: \"kubernetes.io/projected/8602ccbe-13e0-4f57-8794-1e2f86802ae1-kube-api-access-hjbkf\") pod \"cinder-volume-volume1-0\" (UID: \"8602ccbe-13e0-4f57-8794-1e2f86802ae1\") " pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.097898 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.333633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerStarted","Data":"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194"} Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.364047 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.36402454 podStartE2EDuration="3.36402454s" podCreationTimestamp="2025-11-28 16:58:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:29.355104302 +0000 UTC m=+5948.917888113" watchObservedRunningTime="2025-11-28 16:58:29.36402454 +0000 UTC m=+5948.926808341" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.413397 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.417328 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.431408 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.438217 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506055 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-run\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcjcn\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-kube-api-access-qcjcn\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506214 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-scripts\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.506992 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507058 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507401 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-lib-modules\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-ceph\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-dev\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507864 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507882 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-sys\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.507945 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.508056 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610319 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610363 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-lib-modules\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610388 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-ceph\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610413 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-dev\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610428 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-sys\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610458 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610513 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610545 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-run\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610573 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610597 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcjcn\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-kube-api-access-qcjcn\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610617 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-scripts\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610663 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.610681 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.611383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-nvme\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.611512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-run\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.611606 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.612329 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.612379 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-dev\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.613466 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-sys\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.613466 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.613530 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.613550 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-lib-modules\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.613595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/9229318b-2501-439a-8422-4a2e8c837748-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.617970 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.618383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-ceph\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.619313 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-scripts\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.623389 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.623535 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.630720 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9229318b-2501-439a-8422-4a2e8c837748-config-data-custom\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.632179 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcjcn\" (UniqueName: \"kubernetes.io/projected/9229318b-2501-439a-8422-4a2e8c837748-kube-api-access-qcjcn\") pod \"cinder-backup-0\" (UID: \"9229318b-2501-439a-8422-4a2e8c837748\") " pod="openstack/cinder-backup-0" Nov 28 16:58:29 crc kubenswrapper[4884]: I1128 16:58:29.748183 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 16:58:30 crc kubenswrapper[4884]: I1128 16:58:30.291550 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 16:58:30 crc kubenswrapper[4884]: I1128 16:58:30.343945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8602ccbe-13e0-4f57-8794-1e2f86802ae1","Type":"ContainerStarted","Data":"307f9a836b6925c6980ab9b3712c4c4a05e43486fc592023fccac134bfa9d3c3"} Nov 28 16:58:30 crc kubenswrapper[4884]: I1128 16:58:30.347696 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9229318b-2501-439a-8422-4a2e8c837748","Type":"ContainerStarted","Data":"ab967c3487aba4582bb2c1c0109e5768cff61328009804d8dd48439c3175077e"} Nov 28 16:58:31 crc kubenswrapper[4884]: I1128 16:58:31.360929 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8602ccbe-13e0-4f57-8794-1e2f86802ae1","Type":"ContainerStarted","Data":"a1d6da78db36165003b221ae3cd279df4ca62fc4f613283f87a6cf3cf314cd55"} Nov 28 16:58:31 crc kubenswrapper[4884]: I1128 16:58:31.364297 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"8602ccbe-13e0-4f57-8794-1e2f86802ae1","Type":"ContainerStarted","Data":"ed7642fd7b9be5745728c3194b404d5b3d7b7af8f981a8073f1f87806d2b80bf"} Nov 28 16:58:31 crc kubenswrapper[4884]: I1128 16:58:31.400466 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=2.47670834 podStartE2EDuration="3.400446965s" podCreationTimestamp="2025-11-28 16:58:28 +0000 UTC" firstStartedPulling="2025-11-28 16:58:29.632193243 +0000 UTC m=+5949.194977034" lastFinishedPulling="2025-11-28 16:58:30.555931868 +0000 UTC m=+5950.118715659" observedRunningTime="2025-11-28 16:58:31.389012135 +0000 UTC m=+5950.951795936" watchObservedRunningTime="2025-11-28 16:58:31.400446965 +0000 UTC m=+5950.963230766" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.037263 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.089665 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166658 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166798 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htmfn\" (UniqueName: \"kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166837 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166888 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166942 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.166995 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.167073 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id\") pod \"6766345a-0cd7-424c-9924-93a60a312625\" (UID: \"6766345a-0cd7-424c-9924-93a60a312625\") " Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.167422 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.168306 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs" (OuterVolumeSpecName: "logs") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.169918 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6766345a-0cd7-424c-9924-93a60a312625-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.169954 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6766345a-0cd7-424c-9924-93a60a312625-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.172040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn" (OuterVolumeSpecName: "kube-api-access-htmfn") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "kube-api-access-htmfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.177293 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.178018 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts" (OuterVolumeSpecName: "scripts") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.219111 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.222331 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data" (OuterVolumeSpecName: "config-data") pod "6766345a-0cd7-424c-9924-93a60a312625" (UID: "6766345a-0cd7-424c-9924-93a60a312625"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.285070 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htmfn\" (UniqueName: \"kubernetes.io/projected/6766345a-0cd7-424c-9924-93a60a312625-kube-api-access-htmfn\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.285172 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.285189 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.285249 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.285262 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6766345a-0cd7-424c-9924-93a60a312625-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.383332 4884 generic.go:334] "Generic (PLEG): container finished" podID="6766345a-0cd7-424c-9924-93a60a312625" containerID="e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857" exitCode=0 Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.383421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerDied","Data":"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857"} Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.383471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6766345a-0cd7-424c-9924-93a60a312625","Type":"ContainerDied","Data":"6b9585b000ce05da37a5a8c38359d55619ae12eea4dc576afd18843c787058d5"} Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.383489 4884 scope.go:117] "RemoveContainer" containerID="e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.383438 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.398014 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9229318b-2501-439a-8422-4a2e8c837748","Type":"ContainerStarted","Data":"3848b0af9ecb8318a972fcafdad2f9e1a102b692d3c2717dfd786b83b6e7d476"} Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.398061 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"9229318b-2501-439a-8422-4a2e8c837748","Type":"ContainerStarted","Data":"011235f10dc6f466aeb94b4d5f546b40726eec54d708d6ef36a20ee7180f6c0d"} Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.430746 4884 scope.go:117] "RemoveContainer" containerID="eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.451908 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=2.552165727 podStartE2EDuration="3.451889925s" podCreationTimestamp="2025-11-28 16:58:29 +0000 UTC" firstStartedPulling="2025-11-28 16:58:30.291907717 +0000 UTC m=+5949.854691518" lastFinishedPulling="2025-11-28 16:58:31.191631905 +0000 UTC m=+5950.754415716" observedRunningTime="2025-11-28 16:58:32.431919175 +0000 UTC m=+5951.994702996" watchObservedRunningTime="2025-11-28 16:58:32.451889925 +0000 UTC m=+5952.014673726" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.464306 4884 scope.go:117] "RemoveContainer" containerID="e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857" Nov 28 16:58:32 crc kubenswrapper[4884]: E1128 16:58:32.472614 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857\": container with ID starting with e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857 not found: ID does not exist" containerID="e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.472658 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857"} err="failed to get container status \"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857\": rpc error: code = NotFound desc = could not find container \"e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857\": container with ID starting with e7ecdecef6d79a5e72dd6c1e60ada3fb06762949a335427de84e72a15ff27857 not found: ID does not exist" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.472688 4884 scope.go:117] "RemoveContainer" containerID="eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b" Nov 28 16:58:32 crc kubenswrapper[4884]: E1128 16:58:32.475239 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b\": container with ID starting with eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b not found: ID does not exist" containerID="eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.475394 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b"} err="failed to get container status \"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b\": rpc error: code = NotFound desc = could not find container \"eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b\": container with ID starting with eb177356daa4edcf927d6c7b3c55ea1dbaeb613b5841689f808d213c5b35df8b not found: ID does not exist" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.485784 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.511149 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.529153 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:32 crc kubenswrapper[4884]: E1128 16:58:32.529696 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.529717 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api" Nov 28 16:58:32 crc kubenswrapper[4884]: E1128 16:58:32.529744 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api-log" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.529752 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api-log" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.530004 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api-log" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.530034 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.531168 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.533858 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.538834 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.606431 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.609032 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.609495 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.691861 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txrht\" (UniqueName: \"kubernetes.io/projected/d1ef97aa-68ff-487f-8f54-acb5071d9f03-kube-api-access-txrht\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.691943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-scripts\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.691977 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.692305 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.692344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ef97aa-68ff-487f-8f54-acb5071d9f03-logs\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.692391 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1ef97aa-68ff-487f-8f54-acb5071d9f03-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.692416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.700627 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6766345a-0cd7-424c-9924-93a60a312625" path="/var/lib/kubelet/pods/6766345a-0cd7-424c-9924-93a60a312625/volumes" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794324 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794392 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ef97aa-68ff-487f-8f54-acb5071d9f03-logs\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1ef97aa-68ff-487f-8f54-acb5071d9f03-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794496 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d1ef97aa-68ff-487f-8f54-acb5071d9f03-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txrht\" (UniqueName: \"kubernetes.io/projected/d1ef97aa-68ff-487f-8f54-acb5071d9f03-kube-api-access-txrht\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794779 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-scripts\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.794828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.795252 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1ef97aa-68ff-487f-8f54-acb5071d9f03-logs\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.799886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-scripts\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.800159 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.809038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.812343 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1ef97aa-68ff-487f-8f54-acb5071d9f03-config-data-custom\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.819485 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txrht\" (UniqueName: \"kubernetes.io/projected/d1ef97aa-68ff-487f-8f54-acb5071d9f03-kube-api-access-txrht\") pod \"cinder-api-0\" (UID: \"d1ef97aa-68ff-487f-8f54-acb5071d9f03\") " pod="openstack/cinder-api-0" Nov 28 16:58:32 crc kubenswrapper[4884]: I1128 16:58:32.848478 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.382005 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.406326 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1ef97aa-68ff-487f-8f54-acb5071d9f03","Type":"ContainerStarted","Data":"573432d3d580e52c0e0bc90d82965f7242a3c5237a5dc17517ea9ef05fca3340"} Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.409368 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.612008 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.612362 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.612876 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:58:33 crc kubenswrapper[4884]: I1128 16:58:33.616551 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:58:34 crc kubenswrapper[4884]: I1128 16:58:34.098775 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:34 crc kubenswrapper[4884]: I1128 16:58:34.417998 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1ef97aa-68ff-487f-8f54-acb5071d9f03","Type":"ContainerStarted","Data":"06d60ac97943b8e991a1c37cea544c3edd682b26e493a653e9f272175ce52e77"} Nov 28 16:58:34 crc kubenswrapper[4884]: I1128 16:58:34.418047 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:58:34 crc kubenswrapper[4884]: I1128 16:58:34.422992 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:58:34 crc kubenswrapper[4884]: I1128 16:58:34.748795 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 28 16:58:35 crc kubenswrapper[4884]: I1128 16:58:35.428940 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d1ef97aa-68ff-487f-8f54-acb5071d9f03","Type":"ContainerStarted","Data":"880ac6276c541d0689566ee0fa86bce44b9f8e7aa2ad5bb9478d47580a1897bd"} Nov 28 16:58:35 crc kubenswrapper[4884]: I1128 16:58:35.429048 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 16:58:35 crc kubenswrapper[4884]: I1128 16:58:35.475447 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.475424433 podStartE2EDuration="3.475424433s" podCreationTimestamp="2025-11-28 16:58:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:35.44664542 +0000 UTC m=+5955.009429221" watchObservedRunningTime="2025-11-28 16:58:35.475424433 +0000 UTC m=+5955.038208234" Nov 28 16:58:36 crc kubenswrapper[4884]: I1128 16:58:36.688938 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:58:36 crc kubenswrapper[4884]: E1128 16:58:36.689423 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:58:37 crc kubenswrapper[4884]: I1128 16:58:37.009485 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="6766345a-0cd7-424c-9924-93a60a312625" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.1.78:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:58:37 crc kubenswrapper[4884]: I1128 16:58:37.312651 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:58:37 crc kubenswrapper[4884]: I1128 16:58:37.371509 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:37 crc kubenswrapper[4884]: I1128 16:58:37.456256 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="cinder-scheduler" containerID="cri-o://9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1" gracePeriod=30 Nov 28 16:58:37 crc kubenswrapper[4884]: I1128 16:58:37.456712 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="probe" containerID="cri-o://012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194" gracePeriod=30 Nov 28 16:58:38 crc kubenswrapper[4884]: I1128 16:58:38.470978 4884 generic.go:334] "Generic (PLEG): container finished" podID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerID="012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194" exitCode=0 Nov 28 16:58:38 crc kubenswrapper[4884]: I1128 16:58:38.471052 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerDied","Data":"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194"} Nov 28 16:58:39 crc kubenswrapper[4884]: I1128 16:58:39.321195 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 28 16:58:39 crc kubenswrapper[4884]: I1128 16:58:39.923451 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.037268 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041412 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9jll\" (UniqueName: \"kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041527 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041674 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041759 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.041794 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id\") pod \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\" (UID: \"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d\") " Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.042321 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.047184 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.047357 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts" (OuterVolumeSpecName: "scripts") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.048280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll" (OuterVolumeSpecName: "kube-api-access-j9jll") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "kube-api-access-j9jll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.097776 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.145021 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9jll\" (UniqueName: \"kubernetes.io/projected/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-kube-api-access-j9jll\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.145408 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.145422 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.145435 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.145467 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.156389 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data" (OuterVolumeSpecName: "config-data") pod "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" (UID: "a6fe47c6-1822-4b45-9a30-93cc3f66dd3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.247138 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.497319 4884 generic.go:334] "Generic (PLEG): container finished" podID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerID="9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1" exitCode=0 Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.497362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerDied","Data":"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1"} Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.497390 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"a6fe47c6-1822-4b45-9a30-93cc3f66dd3d","Type":"ContainerDied","Data":"fa1c234cce440606805020059f0cbac6de2d0039100696488300e82e8acf785b"} Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.497420 4884 scope.go:117] "RemoveContainer" containerID="012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.497423 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.525708 4884 scope.go:117] "RemoveContainer" containerID="9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.532388 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.540954 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.567217 4884 scope.go:117] "RemoveContainer" containerID="012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.570968 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:40 crc kubenswrapper[4884]: E1128 16:58:40.571500 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="probe" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571531 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="probe" Nov 28 16:58:40 crc kubenswrapper[4884]: E1128 16:58:40.571562 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="cinder-scheduler" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571570 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="cinder-scheduler" Nov 28 16:58:40 crc kubenswrapper[4884]: E1128 16:58:40.571647 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194\": container with ID starting with 012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194 not found: ID does not exist" containerID="012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571690 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194"} err="failed to get container status \"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194\": rpc error: code = NotFound desc = could not find container \"012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194\": container with ID starting with 012c69ce1aea87ff393767eaa9a4ebd718186b466e5b0c30602924157b964194 not found: ID does not exist" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571722 4884 scope.go:117] "RemoveContainer" containerID="9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571776 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="probe" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.571797 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" containerName="cinder-scheduler" Nov 28 16:58:40 crc kubenswrapper[4884]: E1128 16:58:40.572387 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1\": container with ID starting with 9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1 not found: ID does not exist" containerID="9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.572418 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1"} err="failed to get container status \"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1\": rpc error: code = NotFound desc = could not find container \"9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1\": container with ID starting with 9244d8eba8ede6a03d3ce08e6dc47b1cf865faa491ef84d63e1b5de441e77eb1 not found: ID does not exist" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.572899 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.575487 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.587479 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.657678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.657845 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/76a4dca2-e8f2-40eb-8918-61cb5e11db55-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.657932 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.658001 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.658024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nss2m\" (UniqueName: \"kubernetes.io/projected/76a4dca2-e8f2-40eb-8918-61cb5e11db55-kube-api-access-nss2m\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.658057 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-scripts\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.697410 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6fe47c6-1822-4b45-9a30-93cc3f66dd3d" path="/var/lib/kubelet/pods/a6fe47c6-1822-4b45-9a30-93cc3f66dd3d/volumes" Nov 28 16:58:40 crc kubenswrapper[4884]: E1128 16:58:40.718670 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6fe47c6_1822_4b45_9a30_93cc3f66dd3d.slice\": RecentStats: unable to find data in memory cache]" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.761710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.762828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/76a4dca2-e8f2-40eb-8918-61cb5e11db55-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.762900 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.762989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/76a4dca2-e8f2-40eb-8918-61cb5e11db55-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.763011 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.763063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nss2m\" (UniqueName: \"kubernetes.io/projected/76a4dca2-e8f2-40eb-8918-61cb5e11db55-kube-api-access-nss2m\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.763126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-scripts\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.767798 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.769349 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.780459 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-scripts\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.780900 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/76a4dca2-e8f2-40eb-8918-61cb5e11db55-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.784449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nss2m\" (UniqueName: \"kubernetes.io/projected/76a4dca2-e8f2-40eb-8918-61cb5e11db55-kube-api-access-nss2m\") pod \"cinder-scheduler-0\" (UID: \"76a4dca2-e8f2-40eb-8918-61cb5e11db55\") " pod="openstack/cinder-scheduler-0" Nov 28 16:58:40 crc kubenswrapper[4884]: I1128 16:58:40.896648 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 16:58:41 crc kubenswrapper[4884]: I1128 16:58:41.365977 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 16:58:41 crc kubenswrapper[4884]: W1128 16:58:41.369694 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76a4dca2_e8f2_40eb_8918_61cb5e11db55.slice/crio-656d65c939ab940e5d9ab419f679dfb7e42915c8e13d80bfc7b6b84064aa714d WatchSource:0}: Error finding container 656d65c939ab940e5d9ab419f679dfb7e42915c8e13d80bfc7b6b84064aa714d: Status 404 returned error can't find the container with id 656d65c939ab940e5d9ab419f679dfb7e42915c8e13d80bfc7b6b84064aa714d Nov 28 16:58:41 crc kubenswrapper[4884]: I1128 16:58:41.510500 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"76a4dca2-e8f2-40eb-8918-61cb5e11db55","Type":"ContainerStarted","Data":"656d65c939ab940e5d9ab419f679dfb7e42915c8e13d80bfc7b6b84064aa714d"} Nov 28 16:58:42 crc kubenswrapper[4884]: I1128 16:58:42.524696 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"76a4dca2-e8f2-40eb-8918-61cb5e11db55","Type":"ContainerStarted","Data":"0d1139d6b7ff11ebcc6e032e20aaca23bea324615ab3b13912273ecdecfb7c75"} Nov 28 16:58:43 crc kubenswrapper[4884]: I1128 16:58:43.547693 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"76a4dca2-e8f2-40eb-8918-61cb5e11db55","Type":"ContainerStarted","Data":"86e1263cf255a4e42eba97ebc5c39e5273d02179ad5e71c25810dbee61a3e5ea"} Nov 28 16:58:43 crc kubenswrapper[4884]: I1128 16:58:43.578746 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.578723461 podStartE2EDuration="3.578723461s" podCreationTimestamp="2025-11-28 16:58:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:58:43.572473678 +0000 UTC m=+5963.135257489" watchObservedRunningTime="2025-11-28 16:58:43.578723461 +0000 UTC m=+5963.141507262" Nov 28 16:58:44 crc kubenswrapper[4884]: I1128 16:58:44.722048 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 16:58:45 crc kubenswrapper[4884]: I1128 16:58:45.897836 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 16:58:49 crc kubenswrapper[4884]: I1128 16:58:49.690499 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:58:49 crc kubenswrapper[4884]: E1128 16:58:49.691551 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:58:51 crc kubenswrapper[4884]: I1128 16:58:51.096527 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 16:59:04 crc kubenswrapper[4884]: I1128 16:59:04.688499 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:59:04 crc kubenswrapper[4884]: E1128 16:59:04.689652 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:59:16 crc kubenswrapper[4884]: I1128 16:59:16.690025 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:59:16 crc kubenswrapper[4884]: E1128 16:59:16.691223 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:59:24 crc kubenswrapper[4884]: I1128 16:59:24.061862 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-v29hs"] Nov 28 16:59:24 crc kubenswrapper[4884]: I1128 16:59:24.072320 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-v29hs"] Nov 28 16:59:24 crc kubenswrapper[4884]: I1128 16:59:24.700869 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa482a01-e2e1-4d61-a6a9-d09030c4bee4" path="/var/lib/kubelet/pods/aa482a01-e2e1-4d61-a6a9-d09030c4bee4/volumes" Nov 28 16:59:29 crc kubenswrapper[4884]: I1128 16:59:29.688441 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:59:29 crc kubenswrapper[4884]: E1128 16:59:29.689532 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:59:30 crc kubenswrapper[4884]: I1128 16:59:30.776283 4884 scope.go:117] "RemoveContainer" containerID="8bf0d67d1f98131a4193859b2c17646e91a8bfd176f2fb315a6861840a0d09fb" Nov 28 16:59:34 crc kubenswrapper[4884]: I1128 16:59:34.036969 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-cda9-account-create-bddqz"] Nov 28 16:59:34 crc kubenswrapper[4884]: I1128 16:59:34.046552 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-cda9-account-create-bddqz"] Nov 28 16:59:34 crc kubenswrapper[4884]: I1128 16:59:34.699119 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="572b831f-b7ee-4e63-b872-601d5deae7a9" path="/var/lib/kubelet/pods/572b831f-b7ee-4e63-b872-601d5deae7a9/volumes" Nov 28 16:59:40 crc kubenswrapper[4884]: I1128 16:59:40.696931 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:59:40 crc kubenswrapper[4884]: E1128 16:59:40.697746 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:59:41 crc kubenswrapper[4884]: I1128 16:59:41.033249 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-86px5"] Nov 28 16:59:41 crc kubenswrapper[4884]: I1128 16:59:41.042079 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-86px5"] Nov 28 16:59:42 crc kubenswrapper[4884]: I1128 16:59:42.710360 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad256f3-aa0a-4981-b19d-606902f94277" path="/var/lib/kubelet/pods/fad256f3-aa0a-4981-b19d-606902f94277/volumes" Nov 28 16:59:54 crc kubenswrapper[4884]: I1128 16:59:54.045292 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-cgz8w"] Nov 28 16:59:54 crc kubenswrapper[4884]: I1128 16:59:54.053620 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-cgz8w"] Nov 28 16:59:54 crc kubenswrapper[4884]: I1128 16:59:54.687974 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 16:59:54 crc kubenswrapper[4884]: E1128 16:59:54.688366 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 16:59:54 crc kubenswrapper[4884]: I1128 16:59:54.699966 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10baaa35-44ec-4071-b281-56d439579fdd" path="/var/lib/kubelet/pods/10baaa35-44ec-4071-b281-56d439579fdd/volumes" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.163483 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9"] Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.169680 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.172312 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.172340 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.181978 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9"] Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.264858 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.264919 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnlv5\" (UniqueName: \"kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.264950 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.366716 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.366768 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnlv5\" (UniqueName: \"kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.366807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.367710 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.376198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.388276 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnlv5\" (UniqueName: \"kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5\") pod \"collect-profiles-29405820-pt8g9\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.498401 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:00 crc kubenswrapper[4884]: I1128 17:00:00.947997 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9"] Nov 28 17:00:01 crc kubenswrapper[4884]: I1128 17:00:01.428714 4884 generic.go:334] "Generic (PLEG): container finished" podID="fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" containerID="f2d9cf2a2ef20bdd0713da8fdf5572fe8ee35bd323fd16d8dfb39b5446ecba80" exitCode=0 Nov 28 17:00:01 crc kubenswrapper[4884]: I1128 17:00:01.429017 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" event={"ID":"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17","Type":"ContainerDied","Data":"f2d9cf2a2ef20bdd0713da8fdf5572fe8ee35bd323fd16d8dfb39b5446ecba80"} Nov 28 17:00:01 crc kubenswrapper[4884]: I1128 17:00:01.429041 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" event={"ID":"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17","Type":"ContainerStarted","Data":"ed583754f1d4f44f5ac3e9cb9ff86fa4cb6e0006fc22ca57f1c787f9d64478d2"} Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.797563 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.920229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnlv5\" (UniqueName: \"kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5\") pod \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.920406 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume\") pod \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.920537 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume\") pod \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\" (UID: \"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17\") " Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.921738 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume" (OuterVolumeSpecName: "config-volume") pod "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" (UID: "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.926577 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" (UID: "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:00:02 crc kubenswrapper[4884]: I1128 17:00:02.927097 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5" (OuterVolumeSpecName: "kube-api-access-dnlv5") pod "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" (UID: "fa3fe1bb-473c-4cd0-9e1b-f684b7926b17"). InnerVolumeSpecName "kube-api-access-dnlv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.022878 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnlv5\" (UniqueName: \"kubernetes.io/projected/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-kube-api-access-dnlv5\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.022926 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.022945 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.449843 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" event={"ID":"fa3fe1bb-473c-4cd0-9e1b-f684b7926b17","Type":"ContainerDied","Data":"ed583754f1d4f44f5ac3e9cb9ff86fa4cb6e0006fc22ca57f1c787f9d64478d2"} Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.449892 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed583754f1d4f44f5ac3e9cb9ff86fa4cb6e0006fc22ca57f1c787f9d64478d2" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.449967 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9" Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.889981 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8"] Nov 28 17:00:03 crc kubenswrapper[4884]: I1128 17:00:03.901003 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-ncld8"] Nov 28 17:00:04 crc kubenswrapper[4884]: I1128 17:00:04.702629 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3498621-3413-4663-9414-4190355ab301" path="/var/lib/kubelet/pods/a3498621-3413-4663-9414-4190355ab301/volumes" Nov 28 17:00:08 crc kubenswrapper[4884]: I1128 17:00:08.688670 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:00:08 crc kubenswrapper[4884]: E1128 17:00:08.689296 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:00:21 crc kubenswrapper[4884]: I1128 17:00:21.688633 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:00:21 crc kubenswrapper[4884]: E1128 17:00:21.689488 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:00:30 crc kubenswrapper[4884]: I1128 17:00:30.878304 4884 scope.go:117] "RemoveContainer" containerID="492799a792c6f741b01657a75d3865db8d1fbbc5aef6adb1a1124fda2323cc3e" Nov 28 17:00:30 crc kubenswrapper[4884]: I1128 17:00:30.904454 4884 scope.go:117] "RemoveContainer" containerID="d58c8275d5e5c8c84259e6341fe42c69a9879918ff5a0eaff1135ef448e0409f" Nov 28 17:00:30 crc kubenswrapper[4884]: I1128 17:00:30.961712 4884 scope.go:117] "RemoveContainer" containerID="d1908efe993064e9e087d0b2c11cf0dc2368fef0dbac6fba4aab53ec4ac36a99" Nov 28 17:00:30 crc kubenswrapper[4884]: I1128 17:00:30.993398 4884 scope.go:117] "RemoveContainer" containerID="5d4850d5fa26952a7ae433ccb47a014d8022bacda2faa956cf7977b908eded76" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.074416 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-lcdfn"] Nov 28 17:00:31 crc kubenswrapper[4884]: E1128 17:00:31.075124 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" containerName="collect-profiles" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.075142 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" containerName="collect-profiles" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.075347 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" containerName="collect-profiles" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.076021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.078645 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.078903 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-qhqkd" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.086784 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-lcdfn"] Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.106198 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-xkdfq"] Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.109039 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.124412 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xkdfq"] Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210359 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-log-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210403 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210425 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-scripts\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210445 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-etc-ovs\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210620 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-log\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210743 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d86993-2f86-4069-aff5-123cfba6f2a6-scripts\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210812 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkl5w\" (UniqueName: \"kubernetes.io/projected/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-kube-api-access-fkl5w\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210838 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-lib\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.210923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-run\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.211024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkwtf\" (UniqueName: \"kubernetes.io/projected/a5d86993-2f86-4069-aff5-123cfba6f2a6-kube-api-access-pkwtf\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312259 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-log-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312308 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312329 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-scripts\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-etc-ovs\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312367 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-log\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312421 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d86993-2f86-4069-aff5-123cfba6f2a6-scripts\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312445 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkl5w\" (UniqueName: \"kubernetes.io/projected/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-kube-api-access-fkl5w\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312463 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-lib\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312504 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-run\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.312546 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkwtf\" (UniqueName: \"kubernetes.io/projected/a5d86993-2f86-4069-aff5-123cfba6f2a6-kube-api-access-pkwtf\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-log-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313044 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run-ovn\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313123 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-etc-ovs\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-run\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-lib\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-var-run\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.313662 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a5d86993-2f86-4069-aff5-123cfba6f2a6-var-log\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.314960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5d86993-2f86-4069-aff5-123cfba6f2a6-scripts\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.315013 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-scripts\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.331348 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkwtf\" (UniqueName: \"kubernetes.io/projected/a5d86993-2f86-4069-aff5-123cfba6f2a6-kube-api-access-pkwtf\") pod \"ovn-controller-ovs-xkdfq\" (UID: \"a5d86993-2f86-4069-aff5-123cfba6f2a6\") " pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.331982 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkl5w\" (UniqueName: \"kubernetes.io/projected/7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa-kube-api-access-fkl5w\") pod \"ovn-controller-lcdfn\" (UID: \"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa\") " pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.405368 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.453825 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:31 crc kubenswrapper[4884]: I1128 17:00:31.843900 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-lcdfn"] Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.536496 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xkdfq"] Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.688516 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:00:32 crc kubenswrapper[4884]: E1128 17:00:32.688760 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.754152 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-lcdfn" event={"ID":"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa","Type":"ContainerStarted","Data":"1feefd18e4c3b0f7622abca19889459aa8b58b3be84ae43d2ed3316874edefc4"} Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.754207 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-lcdfn" event={"ID":"7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa","Type":"ContainerStarted","Data":"6bf8a9d039116ae93ee91a88eee1c0098f05ff247fd665be22b0eb1c9eb4430b"} Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.754863 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-lcdfn" Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.756822 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xkdfq" event={"ID":"a5d86993-2f86-4069-aff5-123cfba6f2a6","Type":"ContainerStarted","Data":"88003073cd210ee596a8e78c31bf66849b3c3c78f3953243b19b587c600d8160"} Nov 28 17:00:32 crc kubenswrapper[4884]: I1128 17:00:32.774045 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-lcdfn" podStartSLOduration=1.774025368 podStartE2EDuration="1.774025368s" podCreationTimestamp="2025-11-28 17:00:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:00:32.769846316 +0000 UTC m=+6072.332630137" watchObservedRunningTime="2025-11-28 17:00:32.774025368 +0000 UTC m=+6072.336809169" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.769919 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-2wx5g"] Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.771712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.774261 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.774308 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5d86993-2f86-4069-aff5-123cfba6f2a6" containerID="8f519f71b5b53ba94d47d332abb72fe8ffbc497300513d91aa80da69ecf70901" exitCode=0 Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.774463 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xkdfq" event={"ID":"a5d86993-2f86-4069-aff5-123cfba6f2a6","Type":"ContainerDied","Data":"8f519f71b5b53ba94d47d332abb72fe8ffbc497300513d91aa80da69ecf70901"} Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.789725 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-2wx5g"] Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.847984 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-975xp"] Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.849587 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-975xp" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.856080 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-975xp"] Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.879352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovn-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.879441 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a42ba8-dba1-4527-86bb-3d6b53a008c8-config\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.879518 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx6f7\" (UniqueName: \"kubernetes.io/projected/29a42ba8-dba1-4527-86bb-3d6b53a008c8-kube-api-access-sx6f7\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.879649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovs-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985048 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovs-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj476\" (UniqueName: \"kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476\") pod \"octavia-db-create-975xp\" (UID: \"ed25f143-e5ff-44d0-8715-0af6095f5c7d\") " pod="openstack/octavia-db-create-975xp" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985249 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovn-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985299 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a42ba8-dba1-4527-86bb-3d6b53a008c8-config\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx6f7\" (UniqueName: \"kubernetes.io/projected/29a42ba8-dba1-4527-86bb-3d6b53a008c8-kube-api-access-sx6f7\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovs-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.985487 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/29a42ba8-dba1-4527-86bb-3d6b53a008c8-ovn-rundir\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:33 crc kubenswrapper[4884]: I1128 17:00:33.986277 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29a42ba8-dba1-4527-86bb-3d6b53a008c8-config\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.008494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx6f7\" (UniqueName: \"kubernetes.io/projected/29a42ba8-dba1-4527-86bb-3d6b53a008c8-kube-api-access-sx6f7\") pod \"ovn-controller-metrics-2wx5g\" (UID: \"29a42ba8-dba1-4527-86bb-3d6b53a008c8\") " pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.087318 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj476\" (UniqueName: \"kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476\") pod \"octavia-db-create-975xp\" (UID: \"ed25f143-e5ff-44d0-8715-0af6095f5c7d\") " pod="openstack/octavia-db-create-975xp" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.096038 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-2wx5g" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.123675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj476\" (UniqueName: \"kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476\") pod \"octavia-db-create-975xp\" (UID: \"ed25f143-e5ff-44d0-8715-0af6095f5c7d\") " pod="openstack/octavia-db-create-975xp" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.213406 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-975xp" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.568642 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-975xp"] Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.582932 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-2wx5g"] Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.826279 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-2wx5g" event={"ID":"29a42ba8-dba1-4527-86bb-3d6b53a008c8","Type":"ContainerStarted","Data":"0a83f88698479de37832c74b80d84ed05ff6512ce1f7fd2f9b0722a3474cba97"} Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.833033 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-975xp" event={"ID":"ed25f143-e5ff-44d0-8715-0af6095f5c7d","Type":"ContainerStarted","Data":"375f2974e421fa89ede320d2abd9dac7e8dcafffb16d80a2dbd9c28f715f44c5"} Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.853863 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xkdfq" event={"ID":"a5d86993-2f86-4069-aff5-123cfba6f2a6","Type":"ContainerStarted","Data":"0593903af1cf6be3e9e67b7207a0ea0a08a7f80f0784491e9e55205af2c75101"} Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.853913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xkdfq" event={"ID":"a5d86993-2f86-4069-aff5-123cfba6f2a6","Type":"ContainerStarted","Data":"7d433b056ff84feae373067af50b3203ccf2805e05198c7e30644161f7590c11"} Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.854659 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.854782 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:00:34 crc kubenswrapper[4884]: I1128 17:00:34.887047 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-xkdfq" podStartSLOduration=3.8870271560000003 podStartE2EDuration="3.887027156s" podCreationTimestamp="2025-11-28 17:00:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:00:34.87611367 +0000 UTC m=+6074.438897481" watchObservedRunningTime="2025-11-28 17:00:34.887027156 +0000 UTC m=+6074.449810957" Nov 28 17:00:35 crc kubenswrapper[4884]: I1128 17:00:35.866363 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-2wx5g" event={"ID":"29a42ba8-dba1-4527-86bb-3d6b53a008c8","Type":"ContainerStarted","Data":"ec2a60dbb195a9fc159b5d90cfbf084a1f4d11f7960853dae6a39a8c43df31e5"} Nov 28 17:00:35 crc kubenswrapper[4884]: I1128 17:00:35.871004 4884 generic.go:334] "Generic (PLEG): container finished" podID="ed25f143-e5ff-44d0-8715-0af6095f5c7d" containerID="ed8f3bc9fa5b195bd801d5e16c15498073c6f97e53946a74b707d0f6d12b49ef" exitCode=0 Nov 28 17:00:35 crc kubenswrapper[4884]: I1128 17:00:35.871058 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-975xp" event={"ID":"ed25f143-e5ff-44d0-8715-0af6095f5c7d","Type":"ContainerDied","Data":"ed8f3bc9fa5b195bd801d5e16c15498073c6f97e53946a74b707d0f6d12b49ef"} Nov 28 17:00:35 crc kubenswrapper[4884]: I1128 17:00:35.885407 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-2wx5g" podStartSLOduration=2.885381878 podStartE2EDuration="2.885381878s" podCreationTimestamp="2025-11-28 17:00:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:00:35.87853316 +0000 UTC m=+6075.441316971" watchObservedRunningTime="2025-11-28 17:00:35.885381878 +0000 UTC m=+6075.448165679" Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.220217 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-975xp" Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.373564 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kj476\" (UniqueName: \"kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476\") pod \"ed25f143-e5ff-44d0-8715-0af6095f5c7d\" (UID: \"ed25f143-e5ff-44d0-8715-0af6095f5c7d\") " Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.379979 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476" (OuterVolumeSpecName: "kube-api-access-kj476") pod "ed25f143-e5ff-44d0-8715-0af6095f5c7d" (UID: "ed25f143-e5ff-44d0-8715-0af6095f5c7d"). InnerVolumeSpecName "kube-api-access-kj476". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.476245 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kj476\" (UniqueName: \"kubernetes.io/projected/ed25f143-e5ff-44d0-8715-0af6095f5c7d-kube-api-access-kj476\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.895337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-975xp" event={"ID":"ed25f143-e5ff-44d0-8715-0af6095f5c7d","Type":"ContainerDied","Data":"375f2974e421fa89ede320d2abd9dac7e8dcafffb16d80a2dbd9c28f715f44c5"} Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.895608 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="375f2974e421fa89ede320d2abd9dac7e8dcafffb16d80a2dbd9c28f715f44c5" Nov 28 17:00:37 crc kubenswrapper[4884]: I1128 17:00:37.895432 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-975xp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.052652 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-f459-account-create-jzhvp"] Nov 28 17:00:45 crc kubenswrapper[4884]: E1128 17:00:45.054945 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed25f143-e5ff-44d0-8715-0af6095f5c7d" containerName="mariadb-database-create" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.054967 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed25f143-e5ff-44d0-8715-0af6095f5c7d" containerName="mariadb-database-create" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.055219 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed25f143-e5ff-44d0-8715-0af6095f5c7d" containerName="mariadb-database-create" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.056151 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.057933 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.066870 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-f459-account-create-jzhvp"] Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.223347 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx6t2\" (UniqueName: \"kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2\") pod \"octavia-f459-account-create-jzhvp\" (UID: \"da7f06c9-1e63-4a56-bbf3-ff053724a26c\") " pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.271326 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.278019 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.285749 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.324939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx6t2\" (UniqueName: \"kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2\") pod \"octavia-f459-account-create-jzhvp\" (UID: \"da7f06c9-1e63-4a56-bbf3-ff053724a26c\") " pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.344131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx6t2\" (UniqueName: \"kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2\") pod \"octavia-f459-account-create-jzhvp\" (UID: \"da7f06c9-1e63-4a56-bbf3-ff053724a26c\") " pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.382997 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.426829 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.426942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.427120 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wsrk\" (UniqueName: \"kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.528712 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wsrk\" (UniqueName: \"kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.528813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.528866 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.529363 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.530426 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.551306 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wsrk\" (UniqueName: \"kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk\") pod \"redhat-operators-4swq4\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.626108 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.852987 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-f459-account-create-jzhvp"] Nov 28 17:00:45 crc kubenswrapper[4884]: I1128 17:00:45.972140 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f459-account-create-jzhvp" event={"ID":"da7f06c9-1e63-4a56-bbf3-ff053724a26c","Type":"ContainerStarted","Data":"445cc40aa9f0830b69df3253391fba4b71a652f45b744312e9262a0ab2808d92"} Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.093295 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:00:46 crc kubenswrapper[4884]: W1128 17:00:46.103665 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20b94db2_a107_4fed_b403_07a34cb1b89c.slice/crio-6b9cf3c42dd0cfdc7b30414f583ba5bf3f48ba1683999b16b43d0395268f33d1 WatchSource:0}: Error finding container 6b9cf3c42dd0cfdc7b30414f583ba5bf3f48ba1683999b16b43d0395268f33d1: Status 404 returned error can't find the container with id 6b9cf3c42dd0cfdc7b30414f583ba5bf3f48ba1683999b16b43d0395268f33d1 Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.981288 4884 generic.go:334] "Generic (PLEG): container finished" podID="da7f06c9-1e63-4a56-bbf3-ff053724a26c" containerID="d9d32694c2d6d5cbe7a5b5e859922edb55ca1b7a16c71d0f42d85cd5b0ae266f" exitCode=0 Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.981372 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f459-account-create-jzhvp" event={"ID":"da7f06c9-1e63-4a56-bbf3-ff053724a26c","Type":"ContainerDied","Data":"d9d32694c2d6d5cbe7a5b5e859922edb55ca1b7a16c71d0f42d85cd5b0ae266f"} Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.983652 4884 generic.go:334] "Generic (PLEG): container finished" podID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerID="d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed" exitCode=0 Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.983687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerDied","Data":"d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed"} Nov 28 17:00:46 crc kubenswrapper[4884]: I1128 17:00:46.983703 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerStarted","Data":"6b9cf3c42dd0cfdc7b30414f583ba5bf3f48ba1683999b16b43d0395268f33d1"} Nov 28 17:00:47 crc kubenswrapper[4884]: I1128 17:00:47.689346 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:00:47 crc kubenswrapper[4884]: E1128 17:00:47.689593 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:00:48 crc kubenswrapper[4884]: I1128 17:00:48.352521 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:48 crc kubenswrapper[4884]: I1128 17:00:48.390639 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx6t2\" (UniqueName: \"kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2\") pod \"da7f06c9-1e63-4a56-bbf3-ff053724a26c\" (UID: \"da7f06c9-1e63-4a56-bbf3-ff053724a26c\") " Nov 28 17:00:48 crc kubenswrapper[4884]: I1128 17:00:48.396726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2" (OuterVolumeSpecName: "kube-api-access-vx6t2") pod "da7f06c9-1e63-4a56-bbf3-ff053724a26c" (UID: "da7f06c9-1e63-4a56-bbf3-ff053724a26c"). InnerVolumeSpecName "kube-api-access-vx6t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:48 crc kubenswrapper[4884]: I1128 17:00:48.496746 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx6t2\" (UniqueName: \"kubernetes.io/projected/da7f06c9-1e63-4a56-bbf3-ff053724a26c-kube-api-access-vx6t2\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:49 crc kubenswrapper[4884]: I1128 17:00:49.001763 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f459-account-create-jzhvp" event={"ID":"da7f06c9-1e63-4a56-bbf3-ff053724a26c","Type":"ContainerDied","Data":"445cc40aa9f0830b69df3253391fba4b71a652f45b744312e9262a0ab2808d92"} Nov 28 17:00:49 crc kubenswrapper[4884]: I1128 17:00:49.001964 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="445cc40aa9f0830b69df3253391fba4b71a652f45b744312e9262a0ab2808d92" Nov 28 17:00:49 crc kubenswrapper[4884]: I1128 17:00:49.001839 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f459-account-create-jzhvp" Nov 28 17:00:49 crc kubenswrapper[4884]: I1128 17:00:49.003575 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerStarted","Data":"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f"} Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.026957 4884 generic.go:334] "Generic (PLEG): container finished" podID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerID="b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f" exitCode=0 Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.027039 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerDied","Data":"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f"} Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.348376 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-npx2k"] Nov 28 17:00:51 crc kubenswrapper[4884]: E1128 17:00:51.348739 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da7f06c9-1e63-4a56-bbf3-ff053724a26c" containerName="mariadb-account-create" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.348750 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="da7f06c9-1e63-4a56-bbf3-ff053724a26c" containerName="mariadb-account-create" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.348939 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="da7f06c9-1e63-4a56-bbf3-ff053724a26c" containerName="mariadb-account-create" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.349548 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.369623 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-npx2k"] Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.474903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpksh\" (UniqueName: \"kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh\") pod \"octavia-persistence-db-create-npx2k\" (UID: \"c6eebfb8-df42-4119-8ee0-88efd240e6b3\") " pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.576744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpksh\" (UniqueName: \"kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh\") pod \"octavia-persistence-db-create-npx2k\" (UID: \"c6eebfb8-df42-4119-8ee0-88efd240e6b3\") " pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.596131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpksh\" (UniqueName: \"kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh\") pod \"octavia-persistence-db-create-npx2k\" (UID: \"c6eebfb8-df42-4119-8ee0-88efd240e6b3\") " pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:51 crc kubenswrapper[4884]: I1128 17:00:51.667225 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:52 crc kubenswrapper[4884]: I1128 17:00:52.037213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerStarted","Data":"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239"} Nov 28 17:00:52 crc kubenswrapper[4884]: I1128 17:00:52.061138 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4swq4" podStartSLOduration=2.397652757 podStartE2EDuration="7.061120047s" podCreationTimestamp="2025-11-28 17:00:45 +0000 UTC" firstStartedPulling="2025-11-28 17:00:46.986789093 +0000 UTC m=+6086.549572894" lastFinishedPulling="2025-11-28 17:00:51.650256383 +0000 UTC m=+6091.213040184" observedRunningTime="2025-11-28 17:00:52.054614119 +0000 UTC m=+6091.617397920" watchObservedRunningTime="2025-11-28 17:00:52.061120047 +0000 UTC m=+6091.623903848" Nov 28 17:00:52 crc kubenswrapper[4884]: I1128 17:00:52.136534 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-npx2k"] Nov 28 17:00:53 crc kubenswrapper[4884]: I1128 17:00:53.045638 4884 generic.go:334] "Generic (PLEG): container finished" podID="c6eebfb8-df42-4119-8ee0-88efd240e6b3" containerID="c5d0225a6c5c13b6626910e0e3d7cc5972c90921ccc764ad44a54f669a93537a" exitCode=0 Nov 28 17:00:53 crc kubenswrapper[4884]: I1128 17:00:53.045875 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-npx2k" event={"ID":"c6eebfb8-df42-4119-8ee0-88efd240e6b3","Type":"ContainerDied","Data":"c5d0225a6c5c13b6626910e0e3d7cc5972c90921ccc764ad44a54f669a93537a"} Nov 28 17:00:53 crc kubenswrapper[4884]: I1128 17:00:53.045898 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-npx2k" event={"ID":"c6eebfb8-df42-4119-8ee0-88efd240e6b3","Type":"ContainerStarted","Data":"8507cdcb81c45e3b59907f2e93bcffb0219117e05b67edb992ca62534c8ebfcb"} Nov 28 17:00:54 crc kubenswrapper[4884]: I1128 17:00:54.418283 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:54 crc kubenswrapper[4884]: I1128 17:00:54.528112 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpksh\" (UniqueName: \"kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh\") pod \"c6eebfb8-df42-4119-8ee0-88efd240e6b3\" (UID: \"c6eebfb8-df42-4119-8ee0-88efd240e6b3\") " Nov 28 17:00:54 crc kubenswrapper[4884]: I1128 17:00:54.534240 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh" (OuterVolumeSpecName: "kube-api-access-tpksh") pod "c6eebfb8-df42-4119-8ee0-88efd240e6b3" (UID: "c6eebfb8-df42-4119-8ee0-88efd240e6b3"). InnerVolumeSpecName "kube-api-access-tpksh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:00:54 crc kubenswrapper[4884]: I1128 17:00:54.629981 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpksh\" (UniqueName: \"kubernetes.io/projected/c6eebfb8-df42-4119-8ee0-88efd240e6b3-kube-api-access-tpksh\") on node \"crc\" DevicePath \"\"" Nov 28 17:00:55 crc kubenswrapper[4884]: I1128 17:00:55.068205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-npx2k" event={"ID":"c6eebfb8-df42-4119-8ee0-88efd240e6b3","Type":"ContainerDied","Data":"8507cdcb81c45e3b59907f2e93bcffb0219117e05b67edb992ca62534c8ebfcb"} Nov 28 17:00:55 crc kubenswrapper[4884]: I1128 17:00:55.068524 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8507cdcb81c45e3b59907f2e93bcffb0219117e05b67edb992ca62534c8ebfcb" Nov 28 17:00:55 crc kubenswrapper[4884]: I1128 17:00:55.068276 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-npx2k" Nov 28 17:00:55 crc kubenswrapper[4884]: I1128 17:00:55.627270 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:55 crc kubenswrapper[4884]: I1128 17:00:55.627332 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:00:56 crc kubenswrapper[4884]: I1128 17:00:56.697514 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4swq4" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="registry-server" probeResult="failure" output=< Nov 28 17:00:56 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:00:56 crc kubenswrapper[4884]: > Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.162416 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405821-56wvr"] Nov 28 17:01:00 crc kubenswrapper[4884]: E1128 17:01:00.163262 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6eebfb8-df42-4119-8ee0-88efd240e6b3" containerName="mariadb-database-create" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.163279 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6eebfb8-df42-4119-8ee0-88efd240e6b3" containerName="mariadb-database-create" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.163531 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6eebfb8-df42-4119-8ee0-88efd240e6b3" containerName="mariadb-database-create" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.164367 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.172648 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405821-56wvr"] Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.347717 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.347812 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.347900 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.347923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk82w\" (UniqueName: \"kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.449699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.449745 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk82w\" (UniqueName: \"kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.449868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.449921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.453493 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.456945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.459906 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.461977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.462537 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.477132 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.486566 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk82w\" (UniqueName: \"kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w\") pod \"keystone-cron-29405821-56wvr\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.496204 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.552042 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.552513 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.552590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djlnq\" (UniqueName: \"kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.654973 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.655061 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djlnq\" (UniqueName: \"kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.655257 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.655977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.656298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.708290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djlnq\" (UniqueName: \"kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq\") pod \"redhat-marketplace-s6bz2\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.918723 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:00 crc kubenswrapper[4884]: I1128 17:01:00.985720 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405821-56wvr"] Nov 28 17:01:00 crc kubenswrapper[4884]: W1128 17:01:00.995251 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad34ca9a_7c6d_45d4_a4f6_3f92d44d247d.slice/crio-8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85 WatchSource:0}: Error finding container 8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85: Status 404 returned error can't find the container with id 8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85 Nov 28 17:01:01 crc kubenswrapper[4884]: I1128 17:01:01.148790 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-56wvr" event={"ID":"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d","Type":"ContainerStarted","Data":"8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85"} Nov 28 17:01:01 crc kubenswrapper[4884]: I1128 17:01:01.409864 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:01 crc kubenswrapper[4884]: I1128 17:01:01.689624 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.146027 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-1384-account-create-22qvb"] Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.147774 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.149710 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.156700 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-1384-account-create-22qvb"] Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.165450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-56wvr" event={"ID":"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d","Type":"ContainerStarted","Data":"ede6d8de6bdff9bd477ab2731b66c7399b6f20e93ce63ee6777803b22e175de9"} Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.181940 4884 generic.go:334] "Generic (PLEG): container finished" podID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerID="d050375322ac34211ded1b2b3aba23ebe805dc4dcf8e8b1309e1da0107b17d9b" exitCode=0 Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.181991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerDied","Data":"d050375322ac34211ded1b2b3aba23ebe805dc4dcf8e8b1309e1da0107b17d9b"} Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.182018 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerStarted","Data":"b2166950c2475ffbc492a20b382dabf98041ce5b1df636cef2bfcfa91b9a66e5"} Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.184215 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.192831 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405821-56wvr" podStartSLOduration=2.192810002 podStartE2EDuration="2.192810002s" podCreationTimestamp="2025-11-28 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:01:02.19028615 +0000 UTC m=+6101.753069951" watchObservedRunningTime="2025-11-28 17:01:02.192810002 +0000 UTC m=+6101.755593803" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.287625 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8jj7\" (UniqueName: \"kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7\") pod \"octavia-1384-account-create-22qvb\" (UID: \"52816af2-ca28-4db9-844a-416fb7f0f417\") " pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.390689 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8jj7\" (UniqueName: \"kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7\") pod \"octavia-1384-account-create-22qvb\" (UID: \"52816af2-ca28-4db9-844a-416fb7f0f417\") " pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.415744 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8jj7\" (UniqueName: \"kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7\") pod \"octavia-1384-account-create-22qvb\" (UID: \"52816af2-ca28-4db9-844a-416fb7f0f417\") " pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.474379 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.824670 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.826823 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.846186 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.910946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.911037 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.911098 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xv2z\" (UniqueName: \"kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:02 crc kubenswrapper[4884]: I1128 17:01:02.939228 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-1384-account-create-22qvb"] Nov 28 17:01:02 crc kubenswrapper[4884]: W1128 17:01:02.948430 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52816af2_ca28_4db9_844a_416fb7f0f417.slice/crio-49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93 WatchSource:0}: Error finding container 49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93: Status 404 returned error can't find the container with id 49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93 Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.012519 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.012592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.012633 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xv2z\" (UniqueName: \"kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.012995 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.013350 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.033528 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xv2z\" (UniqueName: \"kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z\") pod \"community-operators-n9fht\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.190949 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.196816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7"} Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.205514 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1384-account-create-22qvb" event={"ID":"52816af2-ca28-4db9-844a-416fb7f0f417","Type":"ContainerStarted","Data":"49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93"} Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.213352 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" containerID="ede6d8de6bdff9bd477ab2731b66c7399b6f20e93ce63ee6777803b22e175de9" exitCode=0 Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.213414 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-56wvr" event={"ID":"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d","Type":"ContainerDied","Data":"ede6d8de6bdff9bd477ab2731b66c7399b6f20e93ce63ee6777803b22e175de9"} Nov 28 17:01:03 crc kubenswrapper[4884]: I1128 17:01:03.757016 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:03 crc kubenswrapper[4884]: W1128 17:01:03.763865 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda740737_aea1_4105_8e79_75b7e1de4f77.slice/crio-9c6ed8850f577b33a9d0cc400c9896d9fa108cc89aa7bf815316e41b39f22021 WatchSource:0}: Error finding container 9c6ed8850f577b33a9d0cc400c9896d9fa108cc89aa7bf815316e41b39f22021: Status 404 returned error can't find the container with id 9c6ed8850f577b33a9d0cc400c9896d9fa108cc89aa7bf815316e41b39f22021 Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.254376 4884 generic.go:334] "Generic (PLEG): container finished" podID="da740737-aea1-4105-8e79-75b7e1de4f77" containerID="5e427f99e00b46867164839c032bcaa2b6f2c631fe16578146e87e40937b7047" exitCode=0 Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.254446 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerDied","Data":"5e427f99e00b46867164839c032bcaa2b6f2c631fe16578146e87e40937b7047"} Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.254471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerStarted","Data":"9c6ed8850f577b33a9d0cc400c9896d9fa108cc89aa7bf815316e41b39f22021"} Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.288360 4884 generic.go:334] "Generic (PLEG): container finished" podID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerID="78b9103522da34e990501d9334f7eda0cc17a543c4ad32ca9f1bbc8cf60beed3" exitCode=0 Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.288441 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerDied","Data":"78b9103522da34e990501d9334f7eda0cc17a543c4ad32ca9f1bbc8cf60beed3"} Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.293254 4884 generic.go:334] "Generic (PLEG): container finished" podID="52816af2-ca28-4db9-844a-416fb7f0f417" containerID="ee54ec0f35fdf91f4035f04356d67ae210c1c2eec9dd31e000ad820c9fdd637e" exitCode=0 Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.293354 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1384-account-create-22qvb" event={"ID":"52816af2-ca28-4db9-844a-416fb7f0f417","Type":"ContainerDied","Data":"ee54ec0f35fdf91f4035f04356d67ae210c1c2eec9dd31e000ad820c9fdd637e"} Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.678410 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.762971 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jk82w\" (UniqueName: \"kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w\") pod \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.763194 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data\") pod \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.763306 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys\") pod \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.763425 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle\") pod \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\" (UID: \"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d\") " Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.769669 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" (UID: "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.769994 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w" (OuterVolumeSpecName: "kube-api-access-jk82w") pod "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" (UID: "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d"). InnerVolumeSpecName "kube-api-access-jk82w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.810166 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" (UID: "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.856878 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data" (OuterVolumeSpecName: "config-data") pod "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" (UID: "ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.866435 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.866480 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.866494 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jk82w\" (UniqueName: \"kubernetes.io/projected/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-kube-api-access-jk82w\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:04 crc kubenswrapper[4884]: I1128 17:01:04.866508 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.324476 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerStarted","Data":"a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3"} Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.328334 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405821-56wvr" event={"ID":"ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d","Type":"ContainerDied","Data":"8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85"} Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.328379 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8fdeb2c91ca63b02b78a88f040e78a37bfe7e94d57d0b608a6e7d35f084efe85" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.328396 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405821-56wvr" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.343024 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s6bz2" podStartSLOduration=2.669291282 podStartE2EDuration="5.343004311s" podCreationTimestamp="2025-11-28 17:01:00 +0000 UTC" firstStartedPulling="2025-11-28 17:01:02.183910954 +0000 UTC m=+6101.746694755" lastFinishedPulling="2025-11-28 17:01:04.857623983 +0000 UTC m=+6104.420407784" observedRunningTime="2025-11-28 17:01:05.34051853 +0000 UTC m=+6104.903302341" watchObservedRunningTime="2025-11-28 17:01:05.343004311 +0000 UTC m=+6104.905788112" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.682962 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.741441 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.777760 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.883347 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8jj7\" (UniqueName: \"kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7\") pod \"52816af2-ca28-4db9-844a-416fb7f0f417\" (UID: \"52816af2-ca28-4db9-844a-416fb7f0f417\") " Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.899728 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7" (OuterVolumeSpecName: "kube-api-access-s8jj7") pod "52816af2-ca28-4db9-844a-416fb7f0f417" (UID: "52816af2-ca28-4db9-844a-416fb7f0f417"). InnerVolumeSpecName "kube-api-access-s8jj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:05 crc kubenswrapper[4884]: I1128 17:01:05.986272 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8jj7\" (UniqueName: \"kubernetes.io/projected/52816af2-ca28-4db9-844a-416fb7f0f417-kube-api-access-s8jj7\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.338060 4884 generic.go:334] "Generic (PLEG): container finished" podID="da740737-aea1-4105-8e79-75b7e1de4f77" containerID="4c7fc59673947f4a0910ece47148e7111e5f630287ad8b4352024fce43db2997" exitCode=0 Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.338131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerDied","Data":"4c7fc59673947f4a0910ece47148e7111e5f630287ad8b4352024fce43db2997"} Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.342481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1384-account-create-22qvb" event={"ID":"52816af2-ca28-4db9-844a-416fb7f0f417","Type":"ContainerDied","Data":"49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93"} Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.342616 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1384-account-create-22qvb" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.342676 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49795061ca28cfd2674e5e7448614639456b390716ef73cf5193b0988c8ecb93" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.451786 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-lcdfn" podUID="7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa" containerName="ovn-controller" probeResult="failure" output=< Nov 28 17:01:06 crc kubenswrapper[4884]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 17:01:06 crc kubenswrapper[4884]: > Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.498730 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.528651 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xkdfq" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.656128 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-lcdfn-config-t94jd"] Nov 28 17:01:06 crc kubenswrapper[4884]: E1128 17:01:06.656640 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" containerName="keystone-cron" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.656665 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" containerName="keystone-cron" Nov 28 17:01:06 crc kubenswrapper[4884]: E1128 17:01:06.656685 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52816af2-ca28-4db9-844a-416fb7f0f417" containerName="mariadb-account-create" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.656693 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="52816af2-ca28-4db9-844a-416fb7f0f417" containerName="mariadb-account-create" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.656966 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="52816af2-ca28-4db9-844a-416fb7f0f417" containerName="mariadb-account-create" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.656998 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d" containerName="keystone-cron" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.657773 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.663752 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.675315 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-lcdfn-config-t94jd"] Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.802813 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.803300 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz5kg\" (UniqueName: \"kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.803336 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.803804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.804075 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.804342 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.905968 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906048 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906077 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906127 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz5kg\" (UniqueName: \"kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906178 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906316 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906323 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.906885 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.908025 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.930005 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz5kg\" (UniqueName: \"kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg\") pod \"ovn-controller-lcdfn-config-t94jd\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:06 crc kubenswrapper[4884]: I1128 17:01:06.981714 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:07 crc kubenswrapper[4884]: I1128 17:01:07.353481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerStarted","Data":"e31f685a4fb2cfb3c914fdadabc683427ec9af8f778adebec2ca2e3931961907"} Nov 28 17:01:07 crc kubenswrapper[4884]: I1128 17:01:07.387068 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n9fht" podStartSLOduration=2.8451266349999997 podStartE2EDuration="5.38704531s" podCreationTimestamp="2025-11-28 17:01:02 +0000 UTC" firstStartedPulling="2025-11-28 17:01:04.268722681 +0000 UTC m=+6103.831506482" lastFinishedPulling="2025-11-28 17:01:06.810641356 +0000 UTC m=+6106.373425157" observedRunningTime="2025-11-28 17:01:07.380718526 +0000 UTC m=+6106.943502337" watchObservedRunningTime="2025-11-28 17:01:07.38704531 +0000 UTC m=+6106.949829111" Nov 28 17:01:07 crc kubenswrapper[4884]: I1128 17:01:07.437057 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-lcdfn-config-t94jd"] Nov 28 17:01:07 crc kubenswrapper[4884]: W1128 17:01:07.438071 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5427de0b_0baf_4d28_89db_b3d67958befb.slice/crio-8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43 WatchSource:0}: Error finding container 8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43: Status 404 returned error can't find the container with id 8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43 Nov 28 17:01:08 crc kubenswrapper[4884]: I1128 17:01:08.363072 4884 generic.go:334] "Generic (PLEG): container finished" podID="5427de0b-0baf-4d28-89db-b3d67958befb" containerID="ab2bfdb91e56b5a4239bed2b448579206e305374756eee2dd089ba730a499b05" exitCode=0 Nov 28 17:01:08 crc kubenswrapper[4884]: I1128 17:01:08.363238 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-lcdfn-config-t94jd" event={"ID":"5427de0b-0baf-4d28-89db-b3d67958befb","Type":"ContainerDied","Data":"ab2bfdb91e56b5a4239bed2b448579206e305374756eee2dd089ba730a499b05"} Nov 28 17:01:08 crc kubenswrapper[4884]: I1128 17:01:08.363698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-lcdfn-config-t94jd" event={"ID":"5427de0b-0baf-4d28-89db-b3d67958befb","Type":"ContainerStarted","Data":"8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43"} Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.034958 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.035258 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4swq4" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="registry-server" containerID="cri-o://85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239" gracePeriod=2 Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.178481 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-777656c4c8-725l2"] Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.180642 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.183159 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.183470 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.185295 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-j8x6q" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.215144 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-777656c4c8-725l2"] Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.249438 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.249489 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-octavia-run\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.249559 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-scripts\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.249635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data-merged\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.249664 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-combined-ca-bundle\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.351658 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.351927 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-octavia-run\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.352019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-scripts\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.352319 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-octavia-run\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.352822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data-merged\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.352857 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-combined-ca-bundle\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.353136 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data-merged\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.358102 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-config-data\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.364298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-combined-ca-bundle\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.380198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/232825e9-21d7-4a6b-86ac-b9f32f33d783-scripts\") pod \"octavia-api-777656c4c8-725l2\" (UID: \"232825e9-21d7-4a6b-86ac-b9f32f33d783\") " pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.506688 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.655134 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766218 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766269 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766444 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz5kg\" (UniqueName: \"kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.766473 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts\") pod \"5427de0b-0baf-4d28-89db-b3d67958befb\" (UID: \"5427de0b-0baf-4d28-89db-b3d67958befb\") " Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.768571 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.768619 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run" (OuterVolumeSpecName: "var-run") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.768643 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.768665 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.771975 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts" (OuterVolumeSpecName: "scripts") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.776274 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg" (OuterVolumeSpecName: "kube-api-access-lz5kg") pod "5427de0b-0baf-4d28-89db-b3d67958befb" (UID: "5427de0b-0baf-4d28-89db-b3d67958befb"). InnerVolumeSpecName "kube-api-access-lz5kg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869260 4884 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869299 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869311 4884 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869322 4884 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5427de0b-0baf-4d28-89db-b3d67958befb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869333 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz5kg\" (UniqueName: \"kubernetes.io/projected/5427de0b-0baf-4d28-89db-b3d67958befb-kube-api-access-lz5kg\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:09 crc kubenswrapper[4884]: I1128 17:01:09.869344 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5427de0b-0baf-4d28-89db-b3d67958befb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.067965 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-777656c4c8-725l2"] Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.155585 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.277293 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities\") pod \"20b94db2-a107-4fed-b403-07a34cb1b89c\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.277744 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wsrk\" (UniqueName: \"kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk\") pod \"20b94db2-a107-4fed-b403-07a34cb1b89c\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.277951 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content\") pod \"20b94db2-a107-4fed-b403-07a34cb1b89c\" (UID: \"20b94db2-a107-4fed-b403-07a34cb1b89c\") " Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.278313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities" (OuterVolumeSpecName: "utilities") pod "20b94db2-a107-4fed-b403-07a34cb1b89c" (UID: "20b94db2-a107-4fed-b403-07a34cb1b89c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.278635 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.281310 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk" (OuterVolumeSpecName: "kube-api-access-4wsrk") pod "20b94db2-a107-4fed-b403-07a34cb1b89c" (UID: "20b94db2-a107-4fed-b403-07a34cb1b89c"). InnerVolumeSpecName "kube-api-access-4wsrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.380123 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wsrk\" (UniqueName: \"kubernetes.io/projected/20b94db2-a107-4fed-b403-07a34cb1b89c-kube-api-access-4wsrk\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.384013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20b94db2-a107-4fed-b403-07a34cb1b89c" (UID: "20b94db2-a107-4fed-b403-07a34cb1b89c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.386896 4884 generic.go:334] "Generic (PLEG): container finished" podID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerID="85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239" exitCode=0 Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.386969 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4swq4" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.386974 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerDied","Data":"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239"} Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.387126 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4swq4" event={"ID":"20b94db2-a107-4fed-b403-07a34cb1b89c","Type":"ContainerDied","Data":"6b9cf3c42dd0cfdc7b30414f583ba5bf3f48ba1683999b16b43d0395268f33d1"} Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.387159 4884 scope.go:117] "RemoveContainer" containerID="85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.389632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-777656c4c8-725l2" event={"ID":"232825e9-21d7-4a6b-86ac-b9f32f33d783","Type":"ContainerStarted","Data":"24fce1d0bcbb1155db09fef55dcd64fb84ccd9495ab521063abe06d59c70bb4b"} Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.392352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-lcdfn-config-t94jd" event={"ID":"5427de0b-0baf-4d28-89db-b3d67958befb","Type":"ContainerDied","Data":"8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43"} Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.392379 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8cb29384347f9395bf991f80aaa30f8033817c7d01265635ab46c6ce009e6b43" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.392436 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-lcdfn-config-t94jd" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.427630 4884 scope.go:117] "RemoveContainer" containerID="b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.446118 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.451063 4884 scope.go:117] "RemoveContainer" containerID="d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.456467 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4swq4"] Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.482383 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20b94db2-a107-4fed-b403-07a34cb1b89c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.484911 4884 scope.go:117] "RemoveContainer" containerID="85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239" Nov 28 17:01:10 crc kubenswrapper[4884]: E1128 17:01:10.485319 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239\": container with ID starting with 85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239 not found: ID does not exist" containerID="85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.485354 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239"} err="failed to get container status \"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239\": rpc error: code = NotFound desc = could not find container \"85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239\": container with ID starting with 85ee82c5ee0598e7a85074b011176d12969b21becf8011476ed619d7014e3239 not found: ID does not exist" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.485376 4884 scope.go:117] "RemoveContainer" containerID="b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f" Nov 28 17:01:10 crc kubenswrapper[4884]: E1128 17:01:10.485857 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f\": container with ID starting with b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f not found: ID does not exist" containerID="b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.485887 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f"} err="failed to get container status \"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f\": rpc error: code = NotFound desc = could not find container \"b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f\": container with ID starting with b768aa263a1198f9128df190e886cfe18155d06fa9930dcd572f24d83ceaf09f not found: ID does not exist" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.485901 4884 scope.go:117] "RemoveContainer" containerID="d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed" Nov 28 17:01:10 crc kubenswrapper[4884]: E1128 17:01:10.486231 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed\": container with ID starting with d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed not found: ID does not exist" containerID="d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.486288 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed"} err="failed to get container status \"d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed\": rpc error: code = NotFound desc = could not find container \"d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed\": container with ID starting with d678e821f53679f48deca3195bba37220421dc2bed399a427729c7d89a856eed not found: ID does not exist" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.701611 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" path="/var/lib/kubelet/pods/20b94db2-a107-4fed-b403-07a34cb1b89c/volumes" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.751780 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-lcdfn-config-t94jd"] Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.764051 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-lcdfn-config-t94jd"] Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.919256 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.920422 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:10 crc kubenswrapper[4884]: I1128 17:01:10.964932 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:11 crc kubenswrapper[4884]: I1128 17:01:11.470385 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:11 crc kubenswrapper[4884]: I1128 17:01:11.494943 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-lcdfn" Nov 28 17:01:12 crc kubenswrapper[4884]: I1128 17:01:12.700830 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5427de0b-0baf-4d28-89db-b3d67958befb" path="/var/lib/kubelet/pods/5427de0b-0baf-4d28-89db-b3d67958befb/volumes" Nov 28 17:01:13 crc kubenswrapper[4884]: I1128 17:01:13.191819 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:13 crc kubenswrapper[4884]: I1128 17:01:13.191920 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:13 crc kubenswrapper[4884]: I1128 17:01:13.218936 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:13 crc kubenswrapper[4884]: I1128 17:01:13.259353 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:13 crc kubenswrapper[4884]: I1128 17:01:13.494139 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:14 crc kubenswrapper[4884]: I1128 17:01:14.443319 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s6bz2" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="registry-server" containerID="cri-o://a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3" gracePeriod=2 Nov 28 17:01:14 crc kubenswrapper[4884]: E1128 17:01:14.665364 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf300ebe_784d_4cb7_8498_dc75bb07776b.slice/crio-conmon-a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf300ebe_784d_4cb7_8498_dc75bb07776b.slice/crio-a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:01:15 crc kubenswrapper[4884]: I1128 17:01:15.455139 4884 generic.go:334] "Generic (PLEG): container finished" podID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerID="a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3" exitCode=0 Nov 28 17:01:15 crc kubenswrapper[4884]: I1128 17:01:15.455188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerDied","Data":"a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3"} Nov 28 17:01:15 crc kubenswrapper[4884]: I1128 17:01:15.619794 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:15 crc kubenswrapper[4884]: I1128 17:01:15.620222 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n9fht" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="registry-server" containerID="cri-o://e31f685a4fb2cfb3c914fdadabc683427ec9af8f778adebec2ca2e3931961907" gracePeriod=2 Nov 28 17:01:16 crc kubenswrapper[4884]: I1128 17:01:16.467898 4884 generic.go:334] "Generic (PLEG): container finished" podID="da740737-aea1-4105-8e79-75b7e1de4f77" containerID="e31f685a4fb2cfb3c914fdadabc683427ec9af8f778adebec2ca2e3931961907" exitCode=0 Nov 28 17:01:16 crc kubenswrapper[4884]: I1128 17:01:16.467952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerDied","Data":"e31f685a4fb2cfb3c914fdadabc683427ec9af8f778adebec2ca2e3931961907"} Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.804198 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.812801 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.861869 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content\") pod \"cf300ebe-784d-4cb7-8498-dc75bb07776b\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.861945 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities\") pod \"da740737-aea1-4105-8e79-75b7e1de4f77\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862021 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djlnq\" (UniqueName: \"kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq\") pod \"cf300ebe-784d-4cb7-8498-dc75bb07776b\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862060 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xv2z\" (UniqueName: \"kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z\") pod \"da740737-aea1-4105-8e79-75b7e1de4f77\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862163 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities\") pod \"cf300ebe-784d-4cb7-8498-dc75bb07776b\" (UID: \"cf300ebe-784d-4cb7-8498-dc75bb07776b\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content\") pod \"da740737-aea1-4105-8e79-75b7e1de4f77\" (UID: \"da740737-aea1-4105-8e79-75b7e1de4f77\") " Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862898 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities" (OuterVolumeSpecName: "utilities") pod "cf300ebe-784d-4cb7-8498-dc75bb07776b" (UID: "cf300ebe-784d-4cb7-8498-dc75bb07776b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.862961 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities" (OuterVolumeSpecName: "utilities") pod "da740737-aea1-4105-8e79-75b7e1de4f77" (UID: "da740737-aea1-4105-8e79-75b7e1de4f77"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.868656 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq" (OuterVolumeSpecName: "kube-api-access-djlnq") pod "cf300ebe-784d-4cb7-8498-dc75bb07776b" (UID: "cf300ebe-784d-4cb7-8498-dc75bb07776b"). InnerVolumeSpecName "kube-api-access-djlnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.870234 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z" (OuterVolumeSpecName: "kube-api-access-2xv2z") pod "da740737-aea1-4105-8e79-75b7e1de4f77" (UID: "da740737-aea1-4105-8e79-75b7e1de4f77"). InnerVolumeSpecName "kube-api-access-2xv2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.881594 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf300ebe-784d-4cb7-8498-dc75bb07776b" (UID: "cf300ebe-784d-4cb7-8498-dc75bb07776b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.907561 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da740737-aea1-4105-8e79-75b7e1de4f77" (UID: "da740737-aea1-4105-8e79-75b7e1de4f77"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964054 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xv2z\" (UniqueName: \"kubernetes.io/projected/da740737-aea1-4105-8e79-75b7e1de4f77-kube-api-access-2xv2z\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964106 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964121 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964132 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf300ebe-784d-4cb7-8498-dc75bb07776b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964143 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da740737-aea1-4105-8e79-75b7e1de4f77-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:19 crc kubenswrapper[4884]: I1128 17:01:19.964155 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djlnq\" (UniqueName: \"kubernetes.io/projected/cf300ebe-784d-4cb7-8498-dc75bb07776b-kube-api-access-djlnq\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.511507 4884 generic.go:334] "Generic (PLEG): container finished" podID="232825e9-21d7-4a6b-86ac-b9f32f33d783" containerID="1e0332846fc6df7aefe42ec37f1397a630f4a35af2f82b5a1cf0fc30d06c4689" exitCode=0 Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.511591 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-777656c4c8-725l2" event={"ID":"232825e9-21d7-4a6b-86ac-b9f32f33d783","Type":"ContainerDied","Data":"1e0332846fc6df7aefe42ec37f1397a630f4a35af2f82b5a1cf0fc30d06c4689"} Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.515697 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s6bz2" event={"ID":"cf300ebe-784d-4cb7-8498-dc75bb07776b","Type":"ContainerDied","Data":"b2166950c2475ffbc492a20b382dabf98041ce5b1df636cef2bfcfa91b9a66e5"} Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.515724 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s6bz2" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.515752 4884 scope.go:117] "RemoveContainer" containerID="a5966cfecd4695ac78aaea3e1b805d559b5d98d85a492facbc0463b07193adf3" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.520251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n9fht" event={"ID":"da740737-aea1-4105-8e79-75b7e1de4f77","Type":"ContainerDied","Data":"9c6ed8850f577b33a9d0cc400c9896d9fa108cc89aa7bf815316e41b39f22021"} Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.520307 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n9fht" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.682978 4884 scope.go:117] "RemoveContainer" containerID="78b9103522da34e990501d9334f7eda0cc17a543c4ad32ca9f1bbc8cf60beed3" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.712862 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.727228 4884 scope.go:117] "RemoveContainer" containerID="d050375322ac34211ded1b2b3aba23ebe805dc4dcf8e8b1309e1da0107b17d9b" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.731641 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s6bz2"] Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.750652 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.763019 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n9fht"] Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.773739 4884 scope.go:117] "RemoveContainer" containerID="e31f685a4fb2cfb3c914fdadabc683427ec9af8f778adebec2ca2e3931961907" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.799011 4884 scope.go:117] "RemoveContainer" containerID="4c7fc59673947f4a0910ece47148e7111e5f630287ad8b4352024fce43db2997" Nov 28 17:01:20 crc kubenswrapper[4884]: I1128 17:01:20.869308 4884 scope.go:117] "RemoveContainer" containerID="5e427f99e00b46867164839c032bcaa2b6f2c631fe16578146e87e40937b7047" Nov 28 17:01:21 crc kubenswrapper[4884]: I1128 17:01:21.535889 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-777656c4c8-725l2" event={"ID":"232825e9-21d7-4a6b-86ac-b9f32f33d783","Type":"ContainerStarted","Data":"e20a2436bdc1d53050b66c4402d93a420780b154ab298a071efce3548a9c4a17"} Nov 28 17:01:21 crc kubenswrapper[4884]: I1128 17:01:21.536342 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-777656c4c8-725l2" event={"ID":"232825e9-21d7-4a6b-86ac-b9f32f33d783","Type":"ContainerStarted","Data":"dde277fa5f261930dcde7d68bcf972e320c1556c75ea9ba0e4847d352369fe23"} Nov 28 17:01:21 crc kubenswrapper[4884]: I1128 17:01:21.538085 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:21 crc kubenswrapper[4884]: I1128 17:01:21.538650 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:21 crc kubenswrapper[4884]: I1128 17:01:21.568235 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-777656c4c8-725l2" podStartSLOduration=3.151732119 podStartE2EDuration="12.568215341s" podCreationTimestamp="2025-11-28 17:01:09 +0000 UTC" firstStartedPulling="2025-11-28 17:01:10.058972346 +0000 UTC m=+6109.621756147" lastFinishedPulling="2025-11-28 17:01:19.475455568 +0000 UTC m=+6119.038239369" observedRunningTime="2025-11-28 17:01:21.558490824 +0000 UTC m=+6121.121274645" watchObservedRunningTime="2025-11-28 17:01:21.568215341 +0000 UTC m=+6121.130999152" Nov 28 17:01:22 crc kubenswrapper[4884]: I1128 17:01:22.708859 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" path="/var/lib/kubelet/pods/cf300ebe-784d-4cb7-8498-dc75bb07776b/volumes" Nov 28 17:01:22 crc kubenswrapper[4884]: I1128 17:01:22.710477 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" path="/var/lib/kubelet/pods/da740737-aea1-4105-8e79-75b7e1de4f77/volumes" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.450015 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-4r269"] Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451224 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451244 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451257 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451267 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451281 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451289 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451309 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5427de0b-0baf-4d28-89db-b3d67958befb" containerName="ovn-config" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451316 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5427de0b-0baf-4d28-89db-b3d67958befb" containerName="ovn-config" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451325 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451333 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451348 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451355 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="extract-content" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451369 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451376 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451402 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451410 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="extract-utilities" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451429 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451437 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: E1128 17:01:32.451453 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451460 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451692 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="20b94db2-a107-4fed-b403-07a34cb1b89c" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451712 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf300ebe-784d-4cb7-8498-dc75bb07776b" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451725 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5427de0b-0baf-4d28-89db-b3d67958befb" containerName="ovn-config" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.451738 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="da740737-aea1-4105-8e79-75b7e1de4f77" containerName="registry-server" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.453020 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.455326 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.455670 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.456647 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.505481 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-4r269"] Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.512951 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-scripts\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.513291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-hm-ports\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.513432 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.513534 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data-merged\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.614884 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-hm-ports\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.614974 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.615007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data-merged\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.615070 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-scripts\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.616659 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data-merged\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.617347 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-hm-ports\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.625277 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-scripts\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.634970 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58718a63-4d7e-4e0b-bd7f-140dfbdf18a5-config-data\") pod \"octavia-rsyslog-4r269\" (UID: \"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5\") " pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:32 crc kubenswrapper[4884]: I1128 17:01:32.827901 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.180515 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.183496 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.185498 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.200447 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.224415 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.224847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.330509 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.330988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.331487 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.346372 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config\") pod \"octavia-image-upload-59f8cff499-9v4mf\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.390977 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-4r269"] Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.512430 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:01:33 crc kubenswrapper[4884]: I1128 17:01:33.672927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-4r269" event={"ID":"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5","Type":"ContainerStarted","Data":"6084978dafe105c614a5b7f54f2a98cbe31bf5998a8d9e236c6cf08b2949b69e"} Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.018828 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.681432 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerStarted","Data":"fbd18dd740bf8b053be7812e39f2490bf011fa3c9acf5a3988f4a5f843b8817b"} Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.814815 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-9wv6z"] Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.817656 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.819857 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.844666 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-9wv6z"] Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.876528 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.876593 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.876627 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.876905 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.979185 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.979559 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.979601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.979654 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.981401 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.985645 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.987442 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:34 crc kubenswrapper[4884]: I1128 17:01:34.994705 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle\") pod \"octavia-db-sync-9wv6z\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:35 crc kubenswrapper[4884]: I1128 17:01:35.144370 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:35 crc kubenswrapper[4884]: I1128 17:01:35.605453 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-9wv6z"] Nov 28 17:01:35 crc kubenswrapper[4884]: I1128 17:01:35.695695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-4r269" event={"ID":"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5","Type":"ContainerStarted","Data":"06dad0326cec20c9d591d625a555766a5fb72ebf5e634023fbe421b66fdd4136"} Nov 28 17:01:36 crc kubenswrapper[4884]: I1128 17:01:36.708852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-9wv6z" event={"ID":"443a471a-5e1a-45a6-9af3-ee911ce78535","Type":"ContainerStarted","Data":"d2e771c67087c4ad1a4706f46920440cddd2971a0df37d2b5fc761d73c124d03"} Nov 28 17:01:38 crc kubenswrapper[4884]: I1128 17:01:38.733875 4884 generic.go:334] "Generic (PLEG): container finished" podID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerID="86437da1d838bd138e80d2662433b4a857f7bebc565f2004360e75445f3ebf77" exitCode=0 Nov 28 17:01:38 crc kubenswrapper[4884]: I1128 17:01:38.733985 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-9wv6z" event={"ID":"443a471a-5e1a-45a6-9af3-ee911ce78535","Type":"ContainerDied","Data":"86437da1d838bd138e80d2662433b4a857f7bebc565f2004360e75445f3ebf77"} Nov 28 17:01:38 crc kubenswrapper[4884]: I1128 17:01:38.739485 4884 generic.go:334] "Generic (PLEG): container finished" podID="58718a63-4d7e-4e0b-bd7f-140dfbdf18a5" containerID="06dad0326cec20c9d591d625a555766a5fb72ebf5e634023fbe421b66fdd4136" exitCode=0 Nov 28 17:01:38 crc kubenswrapper[4884]: I1128 17:01:38.739520 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-4r269" event={"ID":"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5","Type":"ContainerDied","Data":"06dad0326cec20c9d591d625a555766a5fb72ebf5e634023fbe421b66fdd4136"} Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.040330 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.168102 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-777656c4c8-725l2" Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.838255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-9wv6z" event={"ID":"443a471a-5e1a-45a6-9af3-ee911ce78535","Type":"ContainerStarted","Data":"8ca9308e05b47470451f6c0a75405c85aa1ebc309b5cec559c23786da88a7f06"} Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.854886 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-4r269" event={"ID":"58718a63-4d7e-4e0b-bd7f-140dfbdf18a5","Type":"ContainerStarted","Data":"33252803010a6696cf9a30a5582254daf056ec1f948e2a1778d59915198d8bf3"} Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.856355 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-4r269" Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.860467 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-9wv6z" podStartSLOduration=10.860449519 podStartE2EDuration="10.860449519s" podCreationTimestamp="2025-11-28 17:01:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:01:44.856612015 +0000 UTC m=+6144.419395836" watchObservedRunningTime="2025-11-28 17:01:44.860449519 +0000 UTC m=+6144.423233330" Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.861807 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerStarted","Data":"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3"} Nov 28 17:01:44 crc kubenswrapper[4884]: I1128 17:01:44.880302 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-4r269" podStartSLOduration=2.317635285 podStartE2EDuration="12.880278995s" podCreationTimestamp="2025-11-28 17:01:32 +0000 UTC" firstStartedPulling="2025-11-28 17:01:33.395712514 +0000 UTC m=+6132.958496305" lastFinishedPulling="2025-11-28 17:01:43.958356214 +0000 UTC m=+6143.521140015" observedRunningTime="2025-11-28 17:01:44.877951347 +0000 UTC m=+6144.440735158" watchObservedRunningTime="2025-11-28 17:01:44.880278995 +0000 UTC m=+6144.443062796" Nov 28 17:01:48 crc kubenswrapper[4884]: I1128 17:01:48.932965 4884 generic.go:334] "Generic (PLEG): container finished" podID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerID="3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3" exitCode=0 Nov 28 17:01:48 crc kubenswrapper[4884]: I1128 17:01:48.933145 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerDied","Data":"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3"} Nov 28 17:01:50 crc kubenswrapper[4884]: I1128 17:01:50.968686 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerStarted","Data":"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9"} Nov 28 17:01:50 crc kubenswrapper[4884]: I1128 17:01:50.970920 4884 generic.go:334] "Generic (PLEG): container finished" podID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerID="8ca9308e05b47470451f6c0a75405c85aa1ebc309b5cec559c23786da88a7f06" exitCode=0 Nov 28 17:01:50 crc kubenswrapper[4884]: I1128 17:01:50.970974 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-9wv6z" event={"ID":"443a471a-5e1a-45a6-9af3-ee911ce78535","Type":"ContainerDied","Data":"8ca9308e05b47470451f6c0a75405c85aa1ebc309b5cec559c23786da88a7f06"} Nov 28 17:01:50 crc kubenswrapper[4884]: I1128 17:01:50.996993 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" podStartSLOduration=1.6335116140000001 podStartE2EDuration="17.996968426s" podCreationTimestamp="2025-11-28 17:01:33 +0000 UTC" firstStartedPulling="2025-11-28 17:01:34.022455481 +0000 UTC m=+6133.585239272" lastFinishedPulling="2025-11-28 17:01:50.385912283 +0000 UTC m=+6149.948696084" observedRunningTime="2025-11-28 17:01:50.984668965 +0000 UTC m=+6150.547452776" watchObservedRunningTime="2025-11-28 17:01:50.996968426 +0000 UTC m=+6150.559752247" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.408723 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.469655 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle\") pod \"443a471a-5e1a-45a6-9af3-ee911ce78535\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.469764 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data\") pod \"443a471a-5e1a-45a6-9af3-ee911ce78535\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.469822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts\") pod \"443a471a-5e1a-45a6-9af3-ee911ce78535\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.469940 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged\") pod \"443a471a-5e1a-45a6-9af3-ee911ce78535\" (UID: \"443a471a-5e1a-45a6-9af3-ee911ce78535\") " Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.475856 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data" (OuterVolumeSpecName: "config-data") pod "443a471a-5e1a-45a6-9af3-ee911ce78535" (UID: "443a471a-5e1a-45a6-9af3-ee911ce78535"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.476214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts" (OuterVolumeSpecName: "scripts") pod "443a471a-5e1a-45a6-9af3-ee911ce78535" (UID: "443a471a-5e1a-45a6-9af3-ee911ce78535"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.503278 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "443a471a-5e1a-45a6-9af3-ee911ce78535" (UID: "443a471a-5e1a-45a6-9af3-ee911ce78535"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.504648 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "443a471a-5e1a-45a6-9af3-ee911ce78535" (UID: "443a471a-5e1a-45a6-9af3-ee911ce78535"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.572660 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.572687 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.572697 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.572705 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/443a471a-5e1a-45a6-9af3-ee911ce78535-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.997130 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-9wv6z" event={"ID":"443a471a-5e1a-45a6-9af3-ee911ce78535","Type":"ContainerDied","Data":"d2e771c67087c4ad1a4706f46920440cddd2971a0df37d2b5fc761d73c124d03"} Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.997185 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d2e771c67087c4ad1a4706f46920440cddd2971a0df37d2b5fc761d73c124d03" Nov 28 17:01:52 crc kubenswrapper[4884]: I1128 17:01:52.997223 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-9wv6z" Nov 28 17:02:02 crc kubenswrapper[4884]: I1128 17:02:02.892471 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-4r269" Nov 28 17:02:13 crc kubenswrapper[4884]: I1128 17:02:13.047413 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-k9vr2"] Nov 28 17:02:13 crc kubenswrapper[4884]: I1128 17:02:13.059996 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-k9vr2"] Nov 28 17:02:14 crc kubenswrapper[4884]: I1128 17:02:14.701951 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86c08ed3-aae0-4c31-8c10-182e580b68e0" path="/var/lib/kubelet/pods/86c08ed3-aae0-4c31-8c10-182e580b68e0/volumes" Nov 28 17:02:16 crc kubenswrapper[4884]: I1128 17:02:16.524589 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:02:16 crc kubenswrapper[4884]: I1128 17:02:16.525232 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="octavia-amphora-httpd" containerID="cri-o://1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9" gracePeriod=30 Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.004597 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.182520 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image\") pod \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.182737 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config\") pod \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\" (UID: \"ff8d1590-5bd0-4ed2-8257-2bccaacc045c\") " Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.227497 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ff8d1590-5bd0-4ed2-8257-2bccaacc045c" (UID: "ff8d1590-5bd0-4ed2-8257-2bccaacc045c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.241275 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "ff8d1590-5bd0-4ed2-8257-2bccaacc045c" (UID: "ff8d1590-5bd0-4ed2-8257-2bccaacc045c"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.253872 4884 generic.go:334] "Generic (PLEG): container finished" podID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerID="1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9" exitCode=0 Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.253915 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerDied","Data":"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9"} Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.253941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" event={"ID":"ff8d1590-5bd0-4ed2-8257-2bccaacc045c","Type":"ContainerDied","Data":"fbd18dd740bf8b053be7812e39f2490bf011fa3c9acf5a3988f4a5f843b8817b"} Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.253960 4884 scope.go:117] "RemoveContainer" containerID="1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.253966 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-9v4mf" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.285496 4884 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.285538 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ff8d1590-5bd0-4ed2-8257-2bccaacc045c-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.325447 4884 scope.go:117] "RemoveContainer" containerID="3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.332768 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.346790 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-9v4mf"] Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.349893 4884 scope.go:117] "RemoveContainer" containerID="1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9" Nov 28 17:02:17 crc kubenswrapper[4884]: E1128 17:02:17.350527 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9\": container with ID starting with 1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9 not found: ID does not exist" containerID="1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.350603 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9"} err="failed to get container status \"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9\": rpc error: code = NotFound desc = could not find container \"1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9\": container with ID starting with 1da599b354ee658a300ab44a1f17145099d4931b5299a4984c9042e600a223d9 not found: ID does not exist" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.350637 4884 scope.go:117] "RemoveContainer" containerID="3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3" Nov 28 17:02:17 crc kubenswrapper[4884]: E1128 17:02:17.351609 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3\": container with ID starting with 3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3 not found: ID does not exist" containerID="3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3" Nov 28 17:02:17 crc kubenswrapper[4884]: I1128 17:02:17.351653 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3"} err="failed to get container status \"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3\": rpc error: code = NotFound desc = could not find container \"3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3\": container with ID starting with 3d290e162ba87f32465fe0f6624808343762b9021977e51bb89654b253a147a3 not found: ID does not exist" Nov 28 17:02:18 crc kubenswrapper[4884]: I1128 17:02:18.701349 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" path="/var/lib/kubelet/pods/ff8d1590-5bd0-4ed2-8257-2bccaacc045c/volumes" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.272198 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7ltcn"] Nov 28 17:02:20 crc kubenswrapper[4884]: E1128 17:02:20.273038 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerName="init" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273073 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerName="init" Nov 28 17:02:20 crc kubenswrapper[4884]: E1128 17:02:20.273142 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="octavia-amphora-httpd" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273163 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="octavia-amphora-httpd" Nov 28 17:02:20 crc kubenswrapper[4884]: E1128 17:02:20.273202 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerName="octavia-db-sync" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273221 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerName="octavia-db-sync" Nov 28 17:02:20 crc kubenswrapper[4884]: E1128 17:02:20.273267 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="init" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273285 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="init" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273750 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" containerName="octavia-db-sync" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.273812 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff8d1590-5bd0-4ed2-8257-2bccaacc045c" containerName="octavia-amphora-httpd" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.276458 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.279447 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.283528 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7ltcn"] Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.443989 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b820105-7d30-4fde-b776-ed3e9aaad018-httpd-config\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.444454 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/1b820105-7d30-4fde-b776-ed3e9aaad018-amphora-image\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.546448 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b820105-7d30-4fde-b776-ed3e9aaad018-httpd-config\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.546552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/1b820105-7d30-4fde-b776-ed3e9aaad018-amphora-image\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.546913 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/1b820105-7d30-4fde-b776-ed3e9aaad018-amphora-image\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.552246 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b820105-7d30-4fde-b776-ed3e9aaad018-httpd-config\") pod \"octavia-image-upload-59f8cff499-7ltcn\" (UID: \"1b820105-7d30-4fde-b776-ed3e9aaad018\") " pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:20 crc kubenswrapper[4884]: I1128 17:02:20.610198 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" Nov 28 17:02:21 crc kubenswrapper[4884]: I1128 17:02:21.128634 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-7ltcn"] Nov 28 17:02:21 crc kubenswrapper[4884]: W1128 17:02:21.128639 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b820105_7d30_4fde_b776_ed3e9aaad018.slice/crio-01a4997358ac69f78f704da38016c9211eed38cbf1f6dba68435bea7d3e2fcf5 WatchSource:0}: Error finding container 01a4997358ac69f78f704da38016c9211eed38cbf1f6dba68435bea7d3e2fcf5: Status 404 returned error can't find the container with id 01a4997358ac69f78f704da38016c9211eed38cbf1f6dba68435bea7d3e2fcf5 Nov 28 17:02:21 crc kubenswrapper[4884]: I1128 17:02:21.301537 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" event={"ID":"1b820105-7d30-4fde-b776-ed3e9aaad018","Type":"ContainerStarted","Data":"01a4997358ac69f78f704da38016c9211eed38cbf1f6dba68435bea7d3e2fcf5"} Nov 28 17:02:22 crc kubenswrapper[4884]: I1128 17:02:22.314813 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" event={"ID":"1b820105-7d30-4fde-b776-ed3e9aaad018","Type":"ContainerStarted","Data":"a2e0c3b28c6cf49cea581520f8c4b66cc738a5d4da9a0dd1458b7f9f480c93eb"} Nov 28 17:02:23 crc kubenswrapper[4884]: I1128 17:02:23.027227 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-7cc3-account-create-glf7r"] Nov 28 17:02:23 crc kubenswrapper[4884]: I1128 17:02:23.036235 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-7cc3-account-create-glf7r"] Nov 28 17:02:23 crc kubenswrapper[4884]: I1128 17:02:23.328025 4884 generic.go:334] "Generic (PLEG): container finished" podID="1b820105-7d30-4fde-b776-ed3e9aaad018" containerID="a2e0c3b28c6cf49cea581520f8c4b66cc738a5d4da9a0dd1458b7f9f480c93eb" exitCode=0 Nov 28 17:02:23 crc kubenswrapper[4884]: I1128 17:02:23.328067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" event={"ID":"1b820105-7d30-4fde-b776-ed3e9aaad018","Type":"ContainerDied","Data":"a2e0c3b28c6cf49cea581520f8c4b66cc738a5d4da9a0dd1458b7f9f480c93eb"} Nov 28 17:02:24 crc kubenswrapper[4884]: I1128 17:02:24.703750 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="814ec177-1088-4a0f-bda9-ee71c9cfa6eb" path="/var/lib/kubelet/pods/814ec177-1088-4a0f-bda9-ee71c9cfa6eb/volumes" Nov 28 17:02:25 crc kubenswrapper[4884]: I1128 17:02:25.351449 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" event={"ID":"1b820105-7d30-4fde-b776-ed3e9aaad018","Type":"ContainerStarted","Data":"0aa1c4d45d6947b26f58d218e4ef9133328100fc2b2916913a5fffba831cb59c"} Nov 28 17:02:25 crc kubenswrapper[4884]: I1128 17:02:25.404740 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-7ltcn" podStartSLOduration=2.171491643 podStartE2EDuration="5.404720149s" podCreationTimestamp="2025-11-28 17:02:20 +0000 UTC" firstStartedPulling="2025-11-28 17:02:21.131344218 +0000 UTC m=+6180.694128029" lastFinishedPulling="2025-11-28 17:02:24.364572734 +0000 UTC m=+6183.927356535" observedRunningTime="2025-11-28 17:02:25.397164822 +0000 UTC m=+6184.959948623" watchObservedRunningTime="2025-11-28 17:02:25.404720149 +0000 UTC m=+6184.967503960" Nov 28 17:02:29 crc kubenswrapper[4884]: I1128 17:02:29.044271 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-gkc4s"] Nov 28 17:02:29 crc kubenswrapper[4884]: I1128 17:02:29.056292 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-gkc4s"] Nov 28 17:02:30 crc kubenswrapper[4884]: I1128 17:02:30.732766 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac7b1d50-de55-4c74-a567-e14c00170191" path="/var/lib/kubelet/pods/ac7b1d50-de55-4c74-a567-e14c00170191/volumes" Nov 28 17:02:31 crc kubenswrapper[4884]: I1128 17:02:31.232621 4884 scope.go:117] "RemoveContainer" containerID="11cd64f702099bf1a995714987d4b861e5b308b2275edf37d9b80f933fb4233c" Nov 28 17:02:31 crc kubenswrapper[4884]: I1128 17:02:31.276279 4884 scope.go:117] "RemoveContainer" containerID="9b9496870baeb91518dbeccea8a731f1a5a576163cb7b1f53a4e87f06bf713d3" Nov 28 17:02:31 crc kubenswrapper[4884]: I1128 17:02:31.318346 4884 scope.go:117] "RemoveContainer" containerID="bb04380908c36683bd8fdfd47caf564a33f756299a4e4e720d4f56dc994517e6" Nov 28 17:02:31 crc kubenswrapper[4884]: I1128 17:02:31.340586 4884 scope.go:117] "RemoveContainer" containerID="70e1168ea111bc97ffeb74f058eb86f3defc92c7b998be4b97f2a0a4a1243a28" Nov 28 17:02:31 crc kubenswrapper[4884]: I1128 17:02:31.401389 4884 scope.go:117] "RemoveContainer" containerID="15199d40da9ba76d2b38a76cf92f4317e1da12546d0918483d3e0a779fc8c9c8" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.047457 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-qs24q"] Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.049860 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.052505 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.052684 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.053001 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.060368 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-qs24q"] Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-scripts\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207274 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-combined-ca-bundle\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207342 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data-merged\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207371 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-amphora-certs\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207442 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.207471 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-hm-ports\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.308950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data-merged\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-amphora-certs\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309119 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309153 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-hm-ports\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309222 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-scripts\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-combined-ca-bundle\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.309584 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data-merged\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.310472 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-hm-ports\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.319583 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-scripts\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.325975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-combined-ca-bundle\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.326747 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-config-data\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.338174 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/6fc7cf80-86a9-4afa-9070-9c04e72bd38a-amphora-certs\") pod \"octavia-healthmanager-qs24q\" (UID: \"6fc7cf80-86a9-4afa-9070-9c04e72bd38a\") " pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:38 crc kubenswrapper[4884]: I1128 17:02:38.369879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.005638 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-qs24q"] Nov 28 17:02:39 crc kubenswrapper[4884]: W1128 17:02:39.012602 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fc7cf80_86a9_4afa_9070_9c04e72bd38a.slice/crio-99e777bc12744ce7dff1ae5deb20264b821d5c295986ec1da3571b21d87d341a WatchSource:0}: Error finding container 99e777bc12744ce7dff1ae5deb20264b821d5c295986ec1da3571b21d87d341a: Status 404 returned error can't find the container with id 99e777bc12744ce7dff1ae5deb20264b821d5c295986ec1da3571b21d87d341a Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.336850 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-z2w2b"] Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.338566 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.342296 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.343995 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.351977 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-z2w2b"] Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.433872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-scripts\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.434223 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-combined-ca-bundle\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.434309 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.434335 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-amphora-certs\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.434357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data-merged\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.434375 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-hm-ports\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.520549 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-qs24q" event={"ID":"6fc7cf80-86a9-4afa-9070-9c04e72bd38a","Type":"ContainerStarted","Data":"99e777bc12744ce7dff1ae5deb20264b821d5c295986ec1da3571b21d87d341a"} Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.540984 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-scripts\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.541036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-combined-ca-bundle\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.541118 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.541142 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-amphora-certs\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.541161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data-merged\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.541177 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-hm-ports\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.542205 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-hm-ports\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.543474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data-merged\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.552152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-scripts\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.552205 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-config-data\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.552646 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-amphora-certs\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.552716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61a616f8-3c9b-4e6e-86dc-1009cfe68cc1-combined-ca-bundle\") pod \"octavia-housekeeping-z2w2b\" (UID: \"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1\") " pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:39 crc kubenswrapper[4884]: I1128 17:02:39.662959 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.033101 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-52gdk"] Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.035066 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.037284 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.037443 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.042056 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-52gdk"] Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151807 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ab7773db-d172-488a-9836-537d682406c3-hm-ports\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151838 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-config-data\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151858 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-amphora-certs\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-combined-ca-bundle\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151913 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ab7773db-d172-488a-9836-537d682406c3-config-data-merged\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.151944 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-scripts\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253418 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ab7773db-d172-488a-9836-537d682406c3-hm-ports\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253637 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-config-data\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253654 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-combined-ca-bundle\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253672 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-amphora-certs\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253722 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ab7773db-d172-488a-9836-537d682406c3-config-data-merged\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.253747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-scripts\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.254417 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-z2w2b"] Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.254640 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/ab7773db-d172-488a-9836-537d682406c3-hm-ports\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.256052 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/ab7773db-d172-488a-9836-537d682406c3-config-data-merged\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.259891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-scripts\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.260909 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-config-data\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.261771 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-combined-ca-bundle\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.261967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/ab7773db-d172-488a-9836-537d682406c3-amphora-certs\") pod \"octavia-worker-52gdk\" (UID: \"ab7773db-d172-488a-9836-537d682406c3\") " pod="openstack/octavia-worker-52gdk" Nov 28 17:02:40 crc kubenswrapper[4884]: I1128 17:02:40.363620 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-52gdk" Nov 28 17:02:41 crc kubenswrapper[4884]: I1128 17:02:40.537344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z2w2b" event={"ID":"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1","Type":"ContainerStarted","Data":"b13e580cbf3f03d881264790d7af4373ef2283b1d593b3bafa12827fdb088381"} Nov 28 17:02:41 crc kubenswrapper[4884]: I1128 17:02:40.539067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-qs24q" event={"ID":"6fc7cf80-86a9-4afa-9070-9c04e72bd38a","Type":"ContainerStarted","Data":"029d25f1c06634329c71b7e33e535b5122d1b24d839abb5494450644c079ce10"} Nov 28 17:02:41 crc kubenswrapper[4884]: I1128 17:02:41.551412 4884 generic.go:334] "Generic (PLEG): container finished" podID="6fc7cf80-86a9-4afa-9070-9c04e72bd38a" containerID="029d25f1c06634329c71b7e33e535b5122d1b24d839abb5494450644c079ce10" exitCode=0 Nov 28 17:02:41 crc kubenswrapper[4884]: I1128 17:02:41.551526 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-qs24q" event={"ID":"6fc7cf80-86a9-4afa-9070-9c04e72bd38a","Type":"ContainerDied","Data":"029d25f1c06634329c71b7e33e535b5122d1b24d839abb5494450644c079ce10"} Nov 28 17:02:41 crc kubenswrapper[4884]: I1128 17:02:41.729366 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-52gdk"] Nov 28 17:02:42 crc kubenswrapper[4884]: W1128 17:02:42.059366 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab7773db_d172_488a_9836_537d682406c3.slice/crio-e89c034cd7a4d43e2a899b93b6def1ec8a398a9c4b8c69e71df0481154c3f08f WatchSource:0}: Error finding container e89c034cd7a4d43e2a899b93b6def1ec8a398a9c4b8c69e71df0481154c3f08f: Status 404 returned error can't find the container with id e89c034cd7a4d43e2a899b93b6def1ec8a398a9c4b8c69e71df0481154c3f08f Nov 28 17:02:42 crc kubenswrapper[4884]: I1128 17:02:42.562023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-52gdk" event={"ID":"ab7773db-d172-488a-9836-537d682406c3","Type":"ContainerStarted","Data":"e89c034cd7a4d43e2a899b93b6def1ec8a398a9c4b8c69e71df0481154c3f08f"} Nov 28 17:02:43 crc kubenswrapper[4884]: I1128 17:02:43.601159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z2w2b" event={"ID":"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1","Type":"ContainerStarted","Data":"a50e767400c63b6d9bf8dfe0719e479612b8dc6b45468fae991cb97c729a83bc"} Nov 28 17:02:43 crc kubenswrapper[4884]: I1128 17:02:43.605253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-qs24q" event={"ID":"6fc7cf80-86a9-4afa-9070-9c04e72bd38a","Type":"ContainerStarted","Data":"1fa9075bdfc74c03570555a828f1875dd52dca3c446b8b65440425611a39407f"} Nov 28 17:02:43 crc kubenswrapper[4884]: I1128 17:02:43.607294 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:44 crc kubenswrapper[4884]: I1128 17:02:44.614516 4884 generic.go:334] "Generic (PLEG): container finished" podID="61a616f8-3c9b-4e6e-86dc-1009cfe68cc1" containerID="a50e767400c63b6d9bf8dfe0719e479612b8dc6b45468fae991cb97c729a83bc" exitCode=0 Nov 28 17:02:44 crc kubenswrapper[4884]: I1128 17:02:44.614632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z2w2b" event={"ID":"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1","Type":"ContainerDied","Data":"a50e767400c63b6d9bf8dfe0719e479612b8dc6b45468fae991cb97c729a83bc"} Nov 28 17:02:44 crc kubenswrapper[4884]: I1128 17:02:44.641835 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-qs24q" podStartSLOduration=6.641813107 podStartE2EDuration="6.641813107s" podCreationTimestamp="2025-11-28 17:02:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:02:43.678905173 +0000 UTC m=+6203.241688974" watchObservedRunningTime="2025-11-28 17:02:44.641813107 +0000 UTC m=+6204.204596918" Nov 28 17:02:45 crc kubenswrapper[4884]: I1128 17:02:45.625913 4884 generic.go:334] "Generic (PLEG): container finished" podID="ab7773db-d172-488a-9836-537d682406c3" containerID="d18d1324fc19ea7c42c311b8b002ba889376281c85cbde96fa71541f9094487c" exitCode=0 Nov 28 17:02:45 crc kubenswrapper[4884]: I1128 17:02:45.626037 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-52gdk" event={"ID":"ab7773db-d172-488a-9836-537d682406c3","Type":"ContainerDied","Data":"d18d1324fc19ea7c42c311b8b002ba889376281c85cbde96fa71541f9094487c"} Nov 28 17:02:45 crc kubenswrapper[4884]: I1128 17:02:45.630971 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-z2w2b" event={"ID":"61a616f8-3c9b-4e6e-86dc-1009cfe68cc1","Type":"ContainerStarted","Data":"18e186fdaa553d49e76cf701b4c16ba94633d36e92c8b3401fc8515f488ea47e"} Nov 28 17:02:45 crc kubenswrapper[4884]: I1128 17:02:45.631179 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:45 crc kubenswrapper[4884]: I1128 17:02:45.682761 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-z2w2b" podStartSLOduration=4.789928784 podStartE2EDuration="6.682734571s" podCreationTimestamp="2025-11-28 17:02:39 +0000 UTC" firstStartedPulling="2025-11-28 17:02:40.246773974 +0000 UTC m=+6199.809557775" lastFinishedPulling="2025-11-28 17:02:42.139579761 +0000 UTC m=+6201.702363562" observedRunningTime="2025-11-28 17:02:45.670430338 +0000 UTC m=+6205.233214149" watchObservedRunningTime="2025-11-28 17:02:45.682734571 +0000 UTC m=+6205.245518372" Nov 28 17:02:46 crc kubenswrapper[4884]: I1128 17:02:46.640352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-52gdk" event={"ID":"ab7773db-d172-488a-9836-537d682406c3","Type":"ContainerStarted","Data":"9cc932a2b33a36e8ff02eb7981c6b348ad517c9f60faa64ffa22b56262265daf"} Nov 28 17:02:46 crc kubenswrapper[4884]: I1128 17:02:46.674377 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-52gdk" podStartSLOduration=4.699937808 podStartE2EDuration="6.674359612s" podCreationTimestamp="2025-11-28 17:02:40 +0000 UTC" firstStartedPulling="2025-11-28 17:02:42.062426113 +0000 UTC m=+6201.625209924" lastFinishedPulling="2025-11-28 17:02:44.036847927 +0000 UTC m=+6203.599631728" observedRunningTime="2025-11-28 17:02:46.666651572 +0000 UTC m=+6206.229435383" watchObservedRunningTime="2025-11-28 17:02:46.674359612 +0000 UTC m=+6206.237143413" Nov 28 17:02:47 crc kubenswrapper[4884]: I1128 17:02:47.649653 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-52gdk" Nov 28 17:02:53 crc kubenswrapper[4884]: I1128 17:02:53.403849 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-qs24q" Nov 28 17:02:54 crc kubenswrapper[4884]: I1128 17:02:54.715030 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-z2w2b" Nov 28 17:02:55 crc kubenswrapper[4884]: I1128 17:02:55.410458 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-52gdk" Nov 28 17:02:56 crc kubenswrapper[4884]: I1128 17:02:56.034269 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-fxdpb"] Nov 28 17:02:56 crc kubenswrapper[4884]: I1128 17:02:56.045836 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-fxdpb"] Nov 28 17:02:56 crc kubenswrapper[4884]: I1128 17:02:56.716601 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8fe0cc2-1f0e-4660-8ca0-5b34ce822408" path="/var/lib/kubelet/pods/a8fe0cc2-1f0e-4660-8ca0-5b34ce822408/volumes" Nov 28 17:03:06 crc kubenswrapper[4884]: I1128 17:03:06.046782 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6465-account-create-z4qc9"] Nov 28 17:03:06 crc kubenswrapper[4884]: I1128 17:03:06.056437 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6465-account-create-z4qc9"] Nov 28 17:03:06 crc kubenswrapper[4884]: I1128 17:03:06.700298 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba" path="/var/lib/kubelet/pods/fc2a1ad9-8fbf-47c7-809e-0ed7ce6232ba/volumes" Nov 28 17:03:15 crc kubenswrapper[4884]: I1128 17:03:15.031449 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-rxcmj"] Nov 28 17:03:15 crc kubenswrapper[4884]: I1128 17:03:15.042789 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-rxcmj"] Nov 28 17:03:16 crc kubenswrapper[4884]: I1128 17:03:16.702862 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="395cfd62-9b8e-429a-9b4b-b583eb48d067" path="/var/lib/kubelet/pods/395cfd62-9b8e-429a-9b4b-b583eb48d067/volumes" Nov 28 17:03:21 crc kubenswrapper[4884]: I1128 17:03:21.243193 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:03:21 crc kubenswrapper[4884]: I1128 17:03:21.243522 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:03:31 crc kubenswrapper[4884]: I1128 17:03:31.576750 4884 scope.go:117] "RemoveContainer" containerID="267dc7eec10be1d22a822a1f997b12266412531de59a25fffb7cd9cd229267ab" Nov 28 17:03:31 crc kubenswrapper[4884]: I1128 17:03:31.669016 4884 scope.go:117] "RemoveContainer" containerID="d05acd59d4241883c7d194743b63c4f8b688ebc3a4a6cf30c6896186b9ae949e" Nov 28 17:03:31 crc kubenswrapper[4884]: I1128 17:03:31.720475 4884 scope.go:117] "RemoveContainer" containerID="03efcc0c1514cc7ee56e1e64bd8fb5e9dd46cc3b6bac98ec090f810fc14ed8fb" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.102777 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.105033 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.114258 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.114260 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.114349 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.114438 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-tfnnt" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.130743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.175161 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.175553 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-httpd" containerID="cri-o://db76e2b33ac3a57a0cd8b028e01644f3dd2972554275c793bebf94fae884c6a7" gracePeriod=30 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.176036 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-log" containerID="cri-o://0297fac66f6f8eda976c4bbc4c1711d9be1fa6137e73637339b9197cfc0a4ea5" gracePeriod=30 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.243923 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.244280 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-log" containerID="cri-o://0f956f1363b968b23f07aa39f48df67e0908b751738b327949b2377feee988b1" gracePeriod=30 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.244403 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-httpd" containerID="cri-o://d05f78462ca296c523d1efae6ccf83fbeda3fad2ac62f298320d0f376a46123c" gracePeriod=30 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.259694 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.261344 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.273146 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.273211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.273271 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.273331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrp9q\" (UniqueName: \"kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.273358 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.290306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.375776 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376349 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrp9q\" (UniqueName: \"kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376438 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376497 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376561 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376622 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xqtg\" (UniqueName: \"kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376692 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376803 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.376836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.377690 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.377757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.378597 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.383970 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.392997 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrp9q\" (UniqueName: \"kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q\") pod \"horizon-5fff568957-wdllx\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.431416 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.484490 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.484561 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.484579 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.484625 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.484678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xqtg\" (UniqueName: \"kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.485504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.485849 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.486823 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.488921 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.498947 4884 generic.go:334] "Generic (PLEG): container finished" podID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerID="0297fac66f6f8eda976c4bbc4c1711d9be1fa6137e73637339b9197cfc0a4ea5" exitCode=143 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.499022 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerDied","Data":"0297fac66f6f8eda976c4bbc4c1711d9be1fa6137e73637339b9197cfc0a4ea5"} Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.501430 4884 generic.go:334] "Generic (PLEG): container finished" podID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerID="0f956f1363b968b23f07aa39f48df67e0908b751738b327949b2377feee988b1" exitCode=143 Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.501487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerDied","Data":"0f956f1363b968b23f07aa39f48df67e0908b751738b327949b2377feee988b1"} Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.503053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xqtg\" (UniqueName: \"kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg\") pod \"horizon-868cd77ff5-7g99m\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.642539 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.833352 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.894893 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.898430 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.913238 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:03:49 crc kubenswrapper[4884]: I1128 17:03:49.963772 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.000829 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.001051 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.001176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.001241 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.001281 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6mjn\" (UniqueName: \"kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.102817 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.102893 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.102932 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6mjn\" (UniqueName: \"kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.103031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.103046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.103443 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.103903 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.104718 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.108923 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.119300 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6mjn\" (UniqueName: \"kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn\") pod \"horizon-86747bdd49-2sdfl\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.194997 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:03:50 crc kubenswrapper[4884]: W1128 17:03:50.196010 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod632de331_6d6e_4351_83d1_1e722a8a634e.slice/crio-a885c3bed3e0a2b54d6f05985aadf71b6080958c771040180b25c09381b62151 WatchSource:0}: Error finding container a885c3bed3e0a2b54d6f05985aadf71b6080958c771040180b25c09381b62151: Status 404 returned error can't find the container with id a885c3bed3e0a2b54d6f05985aadf71b6080958c771040180b25c09381b62151 Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.227886 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.515144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerStarted","Data":"d9f3429817bd28a35474a5acf222a3a6ebcabbe0e836d46dd19bd52b16f082e3"} Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.529957 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerStarted","Data":"a885c3bed3e0a2b54d6f05985aadf71b6080958c771040180b25c09381b62151"} Nov 28 17:03:50 crc kubenswrapper[4884]: I1128 17:03:50.802145 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:03:51 crc kubenswrapper[4884]: I1128 17:03:51.242842 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:03:51 crc kubenswrapper[4884]: I1128 17:03:51.242891 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:03:51 crc kubenswrapper[4884]: I1128 17:03:51.550950 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerStarted","Data":"34c18676bd868eb3c53edf5842af4bd12a0a3309ef4b4684d9d6d62d8deaf120"} Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.316985 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.1.42:9292/healthcheck\": read tcp 10.217.0.2:51876->10.217.1.42:9292: read: connection reset by peer" Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.317050 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.1.42:9292/healthcheck\": read tcp 10.217.0.2:51874->10.217.1.42:9292: read: connection reset by peer" Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.405948 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.1.43:9292/healthcheck\": read tcp 10.217.0.2:36600->10.217.1.43:9292: read: connection reset by peer" Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.405995 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.1.43:9292/healthcheck\": read tcp 10.217.0.2:36590->10.217.1.43:9292: read: connection reset by peer" Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.565146 4884 generic.go:334] "Generic (PLEG): container finished" podID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerID="db76e2b33ac3a57a0cd8b028e01644f3dd2972554275c793bebf94fae884c6a7" exitCode=0 Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.565224 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerDied","Data":"db76e2b33ac3a57a0cd8b028e01644f3dd2972554275c793bebf94fae884c6a7"} Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.568697 4884 generic.go:334] "Generic (PLEG): container finished" podID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerID="d05f78462ca296c523d1efae6ccf83fbeda3fad2ac62f298320d0f376a46123c" exitCode=0 Nov 28 17:03:52 crc kubenswrapper[4884]: I1128 17:03:52.568735 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerDied","Data":"d05f78462ca296c523d1efae6ccf83fbeda3fad2ac62f298320d0f376a46123c"} Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.074646 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-df5hv"] Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.088377 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-df5hv"] Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.460118 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.467397 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.525532 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.531750 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs" (OuterVolumeSpecName: "logs") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627066 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627107 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627186 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627219 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627249 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627268 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627290 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnlcj\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627328 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627347 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts\") pod \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\" (UID: \"8f05bc4a-3dba-42a9-b784-7ff51f6b078d\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627369 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhrxk\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627408 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph\") pod \"5281e6a8-491d-4814-93e9-b93604eeb4a6\" (UID: \"5281e6a8-491d-4814-93e9-b93604eeb4a6\") " Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.627757 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.628892 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.628758 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.629074 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs" (OuterVolumeSpecName: "logs") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.631061 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5281e6a8-491d-4814-93e9-b93604eeb4a6","Type":"ContainerDied","Data":"c33b6d96e9f0b25ea6149f193f59862be404e5f4f845aa746f66034d9beb7bd1"} Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.631150 4884 scope.go:117] "RemoveContainer" containerID="d05f78462ca296c523d1efae6ccf83fbeda3fad2ac62f298320d0f376a46123c" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.631116 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.634720 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph" (OuterVolumeSpecName: "ceph") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.635201 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk" (OuterVolumeSpecName: "kube-api-access-bhrxk") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "kube-api-access-bhrxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.635787 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts" (OuterVolumeSpecName: "scripts") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.636254 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj" (OuterVolumeSpecName: "kube-api-access-mnlcj") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "kube-api-access-mnlcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.638363 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"8f05bc4a-3dba-42a9-b784-7ff51f6b078d","Type":"ContainerDied","Data":"75b28bf636b183561d3131047d061f7dcbf567111537dead09a1466b6498268e"} Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.638858 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.640271 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts" (OuterVolumeSpecName: "scripts") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.640451 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph" (OuterVolumeSpecName: "ceph") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.686884 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.717764 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.725946 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data" (OuterVolumeSpecName: "config-data") pod "8f05bc4a-3dba-42a9-b784-7ff51f6b078d" (UID: "8f05bc4a-3dba-42a9-b784-7ff51f6b078d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728888 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728910 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728919 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728928 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728937 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728948 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728956 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728964 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnlcj\" (UniqueName: \"kubernetes.io/projected/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-kube-api-access-mnlcj\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728972 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5281e6a8-491d-4814-93e9-b93604eeb4a6-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728979 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728988 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f05bc4a-3dba-42a9-b784-7ff51f6b078d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.728996 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhrxk\" (UniqueName: \"kubernetes.io/projected/5281e6a8-491d-4814-93e9-b93604eeb4a6-kube-api-access-bhrxk\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.729688 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data" (OuterVolumeSpecName: "config-data") pod "5281e6a8-491d-4814-93e9-b93604eeb4a6" (UID: "5281e6a8-491d-4814-93e9-b93604eeb4a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.769665 4884 scope.go:117] "RemoveContainer" containerID="0f956f1363b968b23f07aa39f48df67e0908b751738b327949b2377feee988b1" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.788617 4884 scope.go:117] "RemoveContainer" containerID="db76e2b33ac3a57a0cd8b028e01644f3dd2972554275c793bebf94fae884c6a7" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.830307 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5281e6a8-491d-4814-93e9-b93604eeb4a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.831593 4884 scope.go:117] "RemoveContainer" containerID="0297fac66f6f8eda976c4bbc4c1711d9be1fa6137e73637339b9197cfc0a4ea5" Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.968150 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:57 crc kubenswrapper[4884]: I1128 17:03:57.979941 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.004381 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054182 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: E1128 17:03:58.054622 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054640 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: E1128 17:03:58.054657 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054663 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: E1128 17:03:58.054695 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054702 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: E1128 17:03:58.054714 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054719 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054924 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054934 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" containerName="glance-httpd" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.054944 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" containerName="glance-log" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.056077 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.058309 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.060906 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-ww7sc" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.062224 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.081543 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.115004 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.122880 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.124668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.127017 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.130866 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.151687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.151775 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-logs\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.151936 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.151983 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152005 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-ceph\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152064 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jgmw\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-kube-api-access-2jgmw\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152141 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-scripts\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152171 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-config-data\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152446 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk2vz\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-kube-api-access-pk2vz\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-logs\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152618 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152656 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-ceph\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.152777 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254788 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254830 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk2vz\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-kube-api-access-pk2vz\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254852 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-logs\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254869 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254886 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-ceph\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254923 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.254981 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-logs\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255049 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-ceph\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255120 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255150 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jgmw\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-kube-api-access-2jgmw\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255185 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-scripts\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.255215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-config-data\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.256279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-logs\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.256301 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.256398 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-logs\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.256603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/55cff4b4-3b59-43d0-8932-2407e4417f81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.258845 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.260340 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-config-data\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.261516 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-ceph\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.262375 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.262922 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55cff4b4-3b59-43d0-8932-2407e4417f81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.263604 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-scripts\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.269807 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.270843 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-ceph\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.273566 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk2vz\" (UniqueName: \"kubernetes.io/projected/55cff4b4-3b59-43d0-8932-2407e4417f81-kube-api-access-pk2vz\") pod \"glance-default-internal-api-0\" (UID: \"55cff4b4-3b59-43d0-8932-2407e4417f81\") " pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.274758 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jgmw\" (UniqueName: \"kubernetes.io/projected/df5003b3-9cce-4245-9b5a-1d6fb634d2e1-kube-api-access-2jgmw\") pod \"glance-default-external-api-0\" (UID: \"df5003b3-9cce-4245-9b5a-1d6fb634d2e1\") " pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.392434 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.452702 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.672429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerStarted","Data":"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.672760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerStarted","Data":"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.672907 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fff568957-wdllx" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon-log" containerID="cri-o://b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" gracePeriod=30 Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.673424 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fff568957-wdllx" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon" containerID="cri-o://d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" gracePeriod=30 Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.709311 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5281e6a8-491d-4814-93e9-b93604eeb4a6" path="/var/lib/kubelet/pods/5281e6a8-491d-4814-93e9-b93604eeb4a6/volumes" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.710196 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6992d312-2342-4862-8838-7158270733a3" path="/var/lib/kubelet/pods/6992d312-2342-4862-8838-7158270733a3/volumes" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.710864 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f05bc4a-3dba-42a9-b784-7ff51f6b078d" path="/var/lib/kubelet/pods/8f05bc4a-3dba-42a9-b784-7ff51f6b078d/volumes" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.713211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerStarted","Data":"623589a4a8e74e6b5f88467f318f0f5b5f4f373dd6c11a40b7aad44cb4fd6178"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.713249 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerStarted","Data":"4347858c9b50b3471f23cfdeb5098a9b0f4d7f1ed0cbeae7b320c4454b825ffa"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.716013 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5fff568957-wdllx" podStartSLOduration=2.123651311 podStartE2EDuration="9.715993237s" podCreationTimestamp="2025-11-28 17:03:49 +0000 UTC" firstStartedPulling="2025-11-28 17:03:49.955147619 +0000 UTC m=+6269.517931420" lastFinishedPulling="2025-11-28 17:03:57.547489545 +0000 UTC m=+6277.110273346" observedRunningTime="2025-11-28 17:03:58.695367879 +0000 UTC m=+6278.258151680" watchObservedRunningTime="2025-11-28 17:03:58.715993237 +0000 UTC m=+6278.278777038" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.723314 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerStarted","Data":"a6257017442437d8db310e74ed1dbb7e46269f5be18815edf3bfcb49aef095e2"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.723364 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerStarted","Data":"36ef2eec058c3307eb5a977993715b1e71a570be74ba2b41ce5046aa63826303"} Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.747767 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-868cd77ff5-7g99m" podStartSLOduration=2.490238238 podStartE2EDuration="9.747738738s" podCreationTimestamp="2025-11-28 17:03:49 +0000 UTC" firstStartedPulling="2025-11-28 17:03:50.198847263 +0000 UTC m=+6269.761631084" lastFinishedPulling="2025-11-28 17:03:57.456347783 +0000 UTC m=+6277.019131584" observedRunningTime="2025-11-28 17:03:58.728802042 +0000 UTC m=+6278.291585843" watchObservedRunningTime="2025-11-28 17:03:58.747738738 +0000 UTC m=+6278.310522539" Nov 28 17:03:58 crc kubenswrapper[4884]: I1128 17:03:58.781958 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-86747bdd49-2sdfl" podStartSLOduration=3.142162842 podStartE2EDuration="9.781938149s" podCreationTimestamp="2025-11-28 17:03:49 +0000 UTC" firstStartedPulling="2025-11-28 17:03:50.818561416 +0000 UTC m=+6270.381345217" lastFinishedPulling="2025-11-28 17:03:57.458336683 +0000 UTC m=+6277.021120524" observedRunningTime="2025-11-28 17:03:58.760290527 +0000 UTC m=+6278.323074338" watchObservedRunningTime="2025-11-28 17:03:58.781938149 +0000 UTC m=+6278.344721940" Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.042394 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.141685 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 17:03:59 crc kubenswrapper[4884]: W1128 17:03:59.153873 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf5003b3_9cce_4245_9b5a_1d6fb634d2e1.slice/crio-37756eb3444758b7d82c2f365bb387610d604c4079beac2dde38dfc3cdc50292 WatchSource:0}: Error finding container 37756eb3444758b7d82c2f365bb387610d604c4079beac2dde38dfc3cdc50292: Status 404 returned error can't find the container with id 37756eb3444758b7d82c2f365bb387610d604c4079beac2dde38dfc3cdc50292 Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.432329 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.643231 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.643364 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.735931 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"55cff4b4-3b59-43d0-8932-2407e4417f81","Type":"ContainerStarted","Data":"78072885c5addbb8e91064e1b012093ed5fb2ab9083c51b0e95b421e3f787247"} Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.735981 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"55cff4b4-3b59-43d0-8932-2407e4417f81","Type":"ContainerStarted","Data":"07358ccfd9ae58eb838be19a3bb920a6ba6a28e19b1be23c12644b57e6dc0c4a"} Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.737730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"df5003b3-9cce-4245-9b5a-1d6fb634d2e1","Type":"ContainerStarted","Data":"379bcbe4d28c6ae7e5240b7b69b4dc6a25e98ce7bd0fc6fad0fe81fb88293669"} Nov 28 17:03:59 crc kubenswrapper[4884]: I1128 17:03:59.737761 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"df5003b3-9cce-4245-9b5a-1d6fb634d2e1","Type":"ContainerStarted","Data":"37756eb3444758b7d82c2f365bb387610d604c4079beac2dde38dfc3cdc50292"} Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.228824 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.230081 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.756110 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"df5003b3-9cce-4245-9b5a-1d6fb634d2e1","Type":"ContainerStarted","Data":"f65dc54a0e9b1d7e198d5a0bf61b2602df0ae0f6364679692dd20887a2bc6529"} Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.759129 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"55cff4b4-3b59-43d0-8932-2407e4417f81","Type":"ContainerStarted","Data":"0d017f5fe20f0fa4111c5a03adee6c222f0adbb1b1fd2c5e1bcb71b0fc2df217"} Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.789588 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.7895586999999997 podStartE2EDuration="3.7895587s" podCreationTimestamp="2025-11-28 17:03:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:04:00.784598357 +0000 UTC m=+6280.347382198" watchObservedRunningTime="2025-11-28 17:04:00.7895587 +0000 UTC m=+6280.352342521" Nov 28 17:04:00 crc kubenswrapper[4884]: I1128 17:04:00.822676 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.822651914 podStartE2EDuration="3.822651914s" podCreationTimestamp="2025-11-28 17:03:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:04:00.810548536 +0000 UTC m=+6280.373332357" watchObservedRunningTime="2025-11-28 17:04:00.822651914 +0000 UTC m=+6280.385435735" Nov 28 17:04:07 crc kubenswrapper[4884]: I1128 17:04:07.045206 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-4b7e-account-create-2j7zm"] Nov 28 17:04:07 crc kubenswrapper[4884]: I1128 17:04:07.058324 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-4b7e-account-create-2j7zm"] Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.392838 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.394760 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.430912 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.446455 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.457230 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.457311 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.500243 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.501930 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.702511 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="becf87c0-e723-40cb-aaae-294092345f12" path="/var/lib/kubelet/pods/becf87c0-e723-40cb-aaae-294092345f12/volumes" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.841050 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.841352 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.841477 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 17:04:08 crc kubenswrapper[4884]: I1128 17:04:08.841574 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:09 crc kubenswrapper[4884]: I1128 17:04:09.644765 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:04:10 crc kubenswrapper[4884]: I1128 17:04:10.229652 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.113:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.113:8080: connect: connection refused" Nov 28 17:04:10 crc kubenswrapper[4884]: I1128 17:04:10.890080 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:04:10 crc kubenswrapper[4884]: I1128 17:04:10.890248 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 17:04:10 crc kubenswrapper[4884]: I1128 17:04:10.934482 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 17:04:11 crc kubenswrapper[4884]: I1128 17:04:11.235735 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:11 crc kubenswrapper[4884]: I1128 17:04:11.235930 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 17:04:11 crc kubenswrapper[4884]: I1128 17:04:11.239513 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 17:04:15 crc kubenswrapper[4884]: I1128 17:04:15.027046 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-lxfgf"] Nov 28 17:04:15 crc kubenswrapper[4884]: I1128 17:04:15.036642 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-lxfgf"] Nov 28 17:04:16 crc kubenswrapper[4884]: I1128 17:04:16.701477 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf9d842a-9211-40df-9360-d1d46ca4b8be" path="/var/lib/kubelet/pods/bf9d842a-9211-40df-9360-d1d46ca4b8be/volumes" Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.242766 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.243477 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.243591 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.244556 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.244622 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7" gracePeriod=600 Nov 28 17:04:21 crc kubenswrapper[4884]: I1128 17:04:21.619557 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:04:22 crc kubenswrapper[4884]: I1128 17:04:22.027697 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7" exitCode=0 Nov 28 17:04:22 crc kubenswrapper[4884]: I1128 17:04:22.027748 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7"} Nov 28 17:04:22 crc kubenswrapper[4884]: I1128 17:04:22.027786 4884 scope.go:117] "RemoveContainer" containerID="0b2d3cc35504a1983eeb60a4b1dc59183ed7c475331868beb28a3b1aa1b9e027" Nov 28 17:04:22 crc kubenswrapper[4884]: I1128 17:04:22.253747 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:04:23 crc kubenswrapper[4884]: I1128 17:04:23.041724 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2"} Nov 28 17:04:23 crc kubenswrapper[4884]: I1128 17:04:23.305541 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:04:24 crc kubenswrapper[4884]: I1128 17:04:24.017685 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:04:24 crc kubenswrapper[4884]: I1128 17:04:24.068119 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:04:24 crc kubenswrapper[4884]: I1128 17:04:24.068367 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon-log" containerID="cri-o://4347858c9b50b3471f23cfdeb5098a9b0f4d7f1ed0cbeae7b320c4454b825ffa" gracePeriod=30 Nov 28 17:04:24 crc kubenswrapper[4884]: I1128 17:04:24.068482 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" containerID="cri-o://623589a4a8e74e6b5f88467f318f0f5b5f4f373dd6c11a40b7aad44cb4fd6178" gracePeriod=30 Nov 28 17:04:28 crc kubenswrapper[4884]: I1128 17:04:28.093026 4884 generic.go:334] "Generic (PLEG): container finished" podID="632de331-6d6e-4351-83d1-1e722a8a634e" containerID="623589a4a8e74e6b5f88467f318f0f5b5f4f373dd6c11a40b7aad44cb4fd6178" exitCode=0 Nov 28 17:04:28 crc kubenswrapper[4884]: I1128 17:04:28.093069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerDied","Data":"623589a4a8e74e6b5f88467f318f0f5b5f4f373dd6c11a40b7aad44cb4fd6178"} Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.100903 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118180 4884 generic.go:334] "Generic (PLEG): container finished" podID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerID="d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" exitCode=137 Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118212 4884 generic.go:334] "Generic (PLEG): container finished" podID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerID="b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" exitCode=137 Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118234 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerDied","Data":"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2"} Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118261 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerDied","Data":"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2"} Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118271 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fff568957-wdllx" event={"ID":"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757","Type":"ContainerDied","Data":"d9f3429817bd28a35474a5acf222a3a6ebcabbe0e836d46dd19bd52b16f082e3"} Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118278 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fff568957-wdllx" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.118288 4884 scope.go:117] "RemoveContainer" containerID="d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.135975 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.136058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.136100 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrp9q\" (UniqueName: \"kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.136152 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.136930 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.137055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs" (OuterVolumeSpecName: "logs") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.137577 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.154004 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.154431 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q" (OuterVolumeSpecName: "kube-api-access-hrp9q") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757"). InnerVolumeSpecName "kube-api-access-hrp9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:04:29 crc kubenswrapper[4884]: E1128 17:04:29.162623 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data podName:6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757 nodeName:}" failed. No retries permitted until 2025-11-28 17:04:29.662590373 +0000 UTC m=+6309.225374174 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757") : error deleting /var/lib/kubelet/pods/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757/volume-subpaths: remove /var/lib/kubelet/pods/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757/volume-subpaths: no such file or directory Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.162823 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts" (OuterVolumeSpecName: "scripts") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.239606 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrp9q\" (UniqueName: \"kubernetes.io/projected/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-kube-api-access-hrp9q\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.239642 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.239681 4884 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.328879 4884 scope.go:117] "RemoveContainer" containerID="b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.348990 4884 scope.go:117] "RemoveContainer" containerID="d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" Nov 28 17:04:29 crc kubenswrapper[4884]: E1128 17:04:29.349442 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2\": container with ID starting with d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2 not found: ID does not exist" containerID="d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.349479 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2"} err="failed to get container status \"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2\": rpc error: code = NotFound desc = could not find container \"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2\": container with ID starting with d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2 not found: ID does not exist" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.349512 4884 scope.go:117] "RemoveContainer" containerID="b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" Nov 28 17:04:29 crc kubenswrapper[4884]: E1128 17:04:29.349850 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2\": container with ID starting with b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2 not found: ID does not exist" containerID="b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.349881 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2"} err="failed to get container status \"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2\": rpc error: code = NotFound desc = could not find container \"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2\": container with ID starting with b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2 not found: ID does not exist" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.349900 4884 scope.go:117] "RemoveContainer" containerID="d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.350174 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2"} err="failed to get container status \"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2\": rpc error: code = NotFound desc = could not find container \"d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2\": container with ID starting with d82450888427e73e6b265f662fe21bbaf5eb00a1b8bcdb48a85e42328c6cf4b2 not found: ID does not exist" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.350202 4884 scope.go:117] "RemoveContainer" containerID="b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.350440 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2"} err="failed to get container status \"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2\": rpc error: code = NotFound desc = could not find container \"b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2\": container with ID starting with b1998f7a74c4e8cab1a60bc18909ae83d7b4b3b16e2c396eb75468359d39a0b2 not found: ID does not exist" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.643059 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.747000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") pod \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\" (UID: \"6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757\") " Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.748168 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data" (OuterVolumeSpecName: "config-data") pod "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" (UID: "6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:04:29 crc kubenswrapper[4884]: I1128 17:04:29.748569 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:30 crc kubenswrapper[4884]: I1128 17:04:30.053274 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:04:30 crc kubenswrapper[4884]: I1128 17:04:30.071851 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fff568957-wdllx"] Nov 28 17:04:30 crc kubenswrapper[4884]: I1128 17:04:30.708172 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" path="/var/lib/kubelet/pods/6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757/volumes" Nov 28 17:04:31 crc kubenswrapper[4884]: I1128 17:04:31.805445 4884 scope.go:117] "RemoveContainer" containerID="1faa3dd20cbb4aae92601a6b8def3e56d961b6effe5c03a69d7229028652f330" Nov 28 17:04:31 crc kubenswrapper[4884]: I1128 17:04:31.857184 4884 scope.go:117] "RemoveContainer" containerID="a6bffeec389d2ea92a7193f096fae0a78b827688a618c99929eefae16530db0c" Nov 28 17:04:31 crc kubenswrapper[4884]: I1128 17:04:31.911556 4884 scope.go:117] "RemoveContainer" containerID="620df8a6c8c494af7dee62c7ec34ca7dac620f91597094c24093d136fea78276" Nov 28 17:04:39 crc kubenswrapper[4884]: I1128 17:04:39.644164 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:04:45 crc kubenswrapper[4884]: I1128 17:04:45.042187 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-wjsqr"] Nov 28 17:04:45 crc kubenswrapper[4884]: I1128 17:04:45.053606 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-wjsqr"] Nov 28 17:04:46 crc kubenswrapper[4884]: I1128 17:04:46.706764 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a118041-0fdb-4d62-b06c-88ae23dde1c9" path="/var/lib/kubelet/pods/5a118041-0fdb-4d62-b06c-88ae23dde1c9/volumes" Nov 28 17:04:49 crc kubenswrapper[4884]: I1128 17:04:49.643527 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-868cd77ff5-7g99m" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 17:04:49 crc kubenswrapper[4884]: I1128 17:04:49.644328 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:04:54 crc kubenswrapper[4884]: I1128 17:04:54.031875 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-03c4-account-create-bn9g7"] Nov 28 17:04:54 crc kubenswrapper[4884]: I1128 17:04:54.041543 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-03c4-account-create-bn9g7"] Nov 28 17:04:54 crc kubenswrapper[4884]: I1128 17:04:54.398213 4884 generic.go:334] "Generic (PLEG): container finished" podID="632de331-6d6e-4351-83d1-1e722a8a634e" containerID="4347858c9b50b3471f23cfdeb5098a9b0f4d7f1ed0cbeae7b320c4454b825ffa" exitCode=137 Nov 28 17:04:54 crc kubenswrapper[4884]: I1128 17:04:54.398253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerDied","Data":"4347858c9b50b3471f23cfdeb5098a9b0f4d7f1ed0cbeae7b320c4454b825ffa"} Nov 28 17:04:54 crc kubenswrapper[4884]: I1128 17:04:54.701150 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b58ca55-9063-4455-a802-634f36055904" path="/var/lib/kubelet/pods/2b58ca55-9063-4455-a802-634f36055904/volumes" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.083250 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.161695 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data\") pod \"632de331-6d6e-4351-83d1-1e722a8a634e\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.162046 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xqtg\" (UniqueName: \"kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg\") pod \"632de331-6d6e-4351-83d1-1e722a8a634e\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.162315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts\") pod \"632de331-6d6e-4351-83d1-1e722a8a634e\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.162430 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs\") pod \"632de331-6d6e-4351-83d1-1e722a8a634e\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.162519 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key\") pod \"632de331-6d6e-4351-83d1-1e722a8a634e\" (UID: \"632de331-6d6e-4351-83d1-1e722a8a634e\") " Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.162805 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs" (OuterVolumeSpecName: "logs") pod "632de331-6d6e-4351-83d1-1e722a8a634e" (UID: "632de331-6d6e-4351-83d1-1e722a8a634e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.163074 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/632de331-6d6e-4351-83d1-1e722a8a634e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.167530 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "632de331-6d6e-4351-83d1-1e722a8a634e" (UID: "632de331-6d6e-4351-83d1-1e722a8a634e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.167712 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg" (OuterVolumeSpecName: "kube-api-access-2xqtg") pod "632de331-6d6e-4351-83d1-1e722a8a634e" (UID: "632de331-6d6e-4351-83d1-1e722a8a634e"). InnerVolumeSpecName "kube-api-access-2xqtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.186430 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data" (OuterVolumeSpecName: "config-data") pod "632de331-6d6e-4351-83d1-1e722a8a634e" (UID: "632de331-6d6e-4351-83d1-1e722a8a634e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.186936 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts" (OuterVolumeSpecName: "scripts") pod "632de331-6d6e-4351-83d1-1e722a8a634e" (UID: "632de331-6d6e-4351-83d1-1e722a8a634e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.264723 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.264755 4884 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/632de331-6d6e-4351-83d1-1e722a8a634e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.264767 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/632de331-6d6e-4351-83d1-1e722a8a634e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.264778 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xqtg\" (UniqueName: \"kubernetes.io/projected/632de331-6d6e-4351-83d1-1e722a8a634e-kube-api-access-2xqtg\") on node \"crc\" DevicePath \"\"" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.413902 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-868cd77ff5-7g99m" event={"ID":"632de331-6d6e-4351-83d1-1e722a8a634e","Type":"ContainerDied","Data":"a885c3bed3e0a2b54d6f05985aadf71b6080958c771040180b25c09381b62151"} Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.413960 4884 scope.go:117] "RemoveContainer" containerID="623589a4a8e74e6b5f88467f318f0f5b5f4f373dd6c11a40b7aad44cb4fd6178" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.413961 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-868cd77ff5-7g99m" Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.455274 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.464012 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-868cd77ff5-7g99m"] Nov 28 17:04:55 crc kubenswrapper[4884]: I1128 17:04:55.608386 4884 scope.go:117] "RemoveContainer" containerID="4347858c9b50b3471f23cfdeb5098a9b0f4d7f1ed0cbeae7b320c4454b825ffa" Nov 28 17:04:56 crc kubenswrapper[4884]: I1128 17:04:56.702667 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" path="/var/lib/kubelet/pods/632de331-6d6e-4351-83d1-1e722a8a634e/volumes" Nov 28 17:05:01 crc kubenswrapper[4884]: I1128 17:05:01.041192 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-2bl49"] Nov 28 17:05:01 crc kubenswrapper[4884]: I1128 17:05:01.049567 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-2bl49"] Nov 28 17:05:02 crc kubenswrapper[4884]: I1128 17:05:02.709048 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="829cdd1b-9af4-4f6f-bf15-75b8208249e6" path="/var/lib/kubelet/pods/829cdd1b-9af4-4f6f-bf15-75b8208249e6/volumes" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.447702 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6dc467574f-qbf7p"] Nov 28 17:05:06 crc kubenswrapper[4884]: E1128 17:05:06.449487 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449509 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: E1128 17:05:06.449528 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449537 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: E1128 17:05:06.449556 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449562 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: E1128 17:05:06.449587 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449596 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449853 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449871 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="632de331-6d6e-4351-83d1-1e722a8a634e" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449891 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon-log" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.449905 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a6d7a15-1e75-4bd9-9fe4-0d7c54d2b757" containerName="horizon" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.451705 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.469610 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dc467574f-qbf7p"] Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.604078 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-horizon-secret-key\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.604502 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-logs\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.604601 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-scripts\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.604636 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsh47\" (UniqueName: \"kubernetes.io/projected/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-kube-api-access-qsh47\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.604678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-config-data\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.707477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-config-data\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.707677 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-horizon-secret-key\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.707753 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-logs\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.707873 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-scripts\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.707921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsh47\" (UniqueName: \"kubernetes.io/projected/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-kube-api-access-qsh47\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.708351 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-logs\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.708989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-scripts\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.709494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-config-data\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.723539 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-horizon-secret-key\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.755440 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsh47\" (UniqueName: \"kubernetes.io/projected/26e3dc83-2fac-49a2-84c9-cd2b20139b3d-kube-api-access-qsh47\") pod \"horizon-6dc467574f-qbf7p\" (UID: \"26e3dc83-2fac-49a2-84c9-cd2b20139b3d\") " pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:06 crc kubenswrapper[4884]: I1128 17:05:06.778693 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.337571 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6dc467574f-qbf7p"] Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.554240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dc467574f-qbf7p" event={"ID":"26e3dc83-2fac-49a2-84c9-cd2b20139b3d","Type":"ContainerStarted","Data":"ef42806a3d4fe3f6b2b610ed7dae09c3dd93326544b02f3f15ff91ac0a299480"} Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.554543 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dc467574f-qbf7p" event={"ID":"26e3dc83-2fac-49a2-84c9-cd2b20139b3d","Type":"ContainerStarted","Data":"9e23ca70ee39ae9025e02d8ca8a73fd2669f09350cf296534afe2957dc40bb53"} Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.764161 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-b6lrp"] Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.766711 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.778649 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-b6lrp"] Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.833522 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx5j5\" (UniqueName: \"kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5\") pod \"heat-db-create-b6lrp\" (UID: \"24634208-add4-43b8-84fe-ab8e71bd1135\") " pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.935919 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx5j5\" (UniqueName: \"kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5\") pod \"heat-db-create-b6lrp\" (UID: \"24634208-add4-43b8-84fe-ab8e71bd1135\") " pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:07 crc kubenswrapper[4884]: I1128 17:05:07.954446 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx5j5\" (UniqueName: \"kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5\") pod \"heat-db-create-b6lrp\" (UID: \"24634208-add4-43b8-84fe-ab8e71bd1135\") " pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:08 crc kubenswrapper[4884]: I1128 17:05:08.102149 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:08 crc kubenswrapper[4884]: I1128 17:05:08.563985 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6dc467574f-qbf7p" event={"ID":"26e3dc83-2fac-49a2-84c9-cd2b20139b3d","Type":"ContainerStarted","Data":"efdde050ebcad514d18d70645936ada99909e7b510c5873805be85ed2fe6e1e4"} Nov 28 17:05:08 crc kubenswrapper[4884]: I1128 17:05:08.579744 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-b6lrp"] Nov 28 17:05:08 crc kubenswrapper[4884]: I1128 17:05:08.596265 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6dc467574f-qbf7p" podStartSLOduration=2.596241922 podStartE2EDuration="2.596241922s" podCreationTimestamp="2025-11-28 17:05:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:05:08.589751503 +0000 UTC m=+6348.152535304" watchObservedRunningTime="2025-11-28 17:05:08.596241922 +0000 UTC m=+6348.159025723" Nov 28 17:05:09 crc kubenswrapper[4884]: I1128 17:05:09.576216 4884 generic.go:334] "Generic (PLEG): container finished" podID="24634208-add4-43b8-84fe-ab8e71bd1135" containerID="8e31b905d097fbc6fb20c7de704b88149d5d54a75cf25e27a6b53a6c41b294aa" exitCode=0 Nov 28 17:05:09 crc kubenswrapper[4884]: I1128 17:05:09.576286 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-b6lrp" event={"ID":"24634208-add4-43b8-84fe-ab8e71bd1135","Type":"ContainerDied","Data":"8e31b905d097fbc6fb20c7de704b88149d5d54a75cf25e27a6b53a6c41b294aa"} Nov 28 17:05:09 crc kubenswrapper[4884]: I1128 17:05:09.576622 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-b6lrp" event={"ID":"24634208-add4-43b8-84fe-ab8e71bd1135","Type":"ContainerStarted","Data":"277ed9a6b5138db97c54399c4186c144f6f62e027b1bf22c3c5d2fed95a57ca2"} Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.001820 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.113642 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rx5j5\" (UniqueName: \"kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5\") pod \"24634208-add4-43b8-84fe-ab8e71bd1135\" (UID: \"24634208-add4-43b8-84fe-ab8e71bd1135\") " Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.122609 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5" (OuterVolumeSpecName: "kube-api-access-rx5j5") pod "24634208-add4-43b8-84fe-ab8e71bd1135" (UID: "24634208-add4-43b8-84fe-ab8e71bd1135"). InnerVolumeSpecName "kube-api-access-rx5j5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.216612 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rx5j5\" (UniqueName: \"kubernetes.io/projected/24634208-add4-43b8-84fe-ab8e71bd1135-kube-api-access-rx5j5\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.594191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-b6lrp" event={"ID":"24634208-add4-43b8-84fe-ab8e71bd1135","Type":"ContainerDied","Data":"277ed9a6b5138db97c54399c4186c144f6f62e027b1bf22c3c5d2fed95a57ca2"} Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.594241 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="277ed9a6b5138db97c54399c4186c144f6f62e027b1bf22c3c5d2fed95a57ca2" Nov 28 17:05:11 crc kubenswrapper[4884]: I1128 17:05:11.594313 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-b6lrp" Nov 28 17:05:16 crc kubenswrapper[4884]: I1128 17:05:16.779556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:16 crc kubenswrapper[4884]: I1128 17:05:16.781561 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.865221 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-a703-account-create-pzpv6"] Nov 28 17:05:17 crc kubenswrapper[4884]: E1128 17:05:17.866168 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24634208-add4-43b8-84fe-ab8e71bd1135" containerName="mariadb-database-create" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.866188 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="24634208-add4-43b8-84fe-ab8e71bd1135" containerName="mariadb-database-create" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.866517 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="24634208-add4-43b8-84fe-ab8e71bd1135" containerName="mariadb-database-create" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.867640 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.874219 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.893665 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-a703-account-create-pzpv6"] Nov 28 17:05:17 crc kubenswrapper[4884]: I1128 17:05:17.954580 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9gbw\" (UniqueName: \"kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw\") pod \"heat-a703-account-create-pzpv6\" (UID: \"ce397053-6892-4bd6-a62a-788e4b2ab31f\") " pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:18 crc kubenswrapper[4884]: I1128 17:05:18.057243 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9gbw\" (UniqueName: \"kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw\") pod \"heat-a703-account-create-pzpv6\" (UID: \"ce397053-6892-4bd6-a62a-788e4b2ab31f\") " pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:18 crc kubenswrapper[4884]: I1128 17:05:18.095599 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9gbw\" (UniqueName: \"kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw\") pod \"heat-a703-account-create-pzpv6\" (UID: \"ce397053-6892-4bd6-a62a-788e4b2ab31f\") " pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:18 crc kubenswrapper[4884]: I1128 17:05:18.209116 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:18 crc kubenswrapper[4884]: I1128 17:05:18.685555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-a703-account-create-pzpv6" event={"ID":"ce397053-6892-4bd6-a62a-788e4b2ab31f","Type":"ContainerStarted","Data":"cc97dcec80e0e1b5734084743b9282c6aa1f5771602841223546b3f14f9c5f8e"} Nov 28 17:05:18 crc kubenswrapper[4884]: I1128 17:05:18.701307 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-a703-account-create-pzpv6"] Nov 28 17:05:19 crc kubenswrapper[4884]: I1128 17:05:19.698997 4884 generic.go:334] "Generic (PLEG): container finished" podID="ce397053-6892-4bd6-a62a-788e4b2ab31f" containerID="b931350cdf99a76d38af2259c0e3e51af8df470c4598855d730b8212d1d7e834" exitCode=0 Nov 28 17:05:19 crc kubenswrapper[4884]: I1128 17:05:19.699049 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-a703-account-create-pzpv6" event={"ID":"ce397053-6892-4bd6-a62a-788e4b2ab31f","Type":"ContainerDied","Data":"b931350cdf99a76d38af2259c0e3e51af8df470c4598855d730b8212d1d7e834"} Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.165158 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.341881 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9gbw\" (UniqueName: \"kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw\") pod \"ce397053-6892-4bd6-a62a-788e4b2ab31f\" (UID: \"ce397053-6892-4bd6-a62a-788e4b2ab31f\") " Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.349538 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw" (OuterVolumeSpecName: "kube-api-access-g9gbw") pod "ce397053-6892-4bd6-a62a-788e4b2ab31f" (UID: "ce397053-6892-4bd6-a62a-788e4b2ab31f"). InnerVolumeSpecName "kube-api-access-g9gbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.449944 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9gbw\" (UniqueName: \"kubernetes.io/projected/ce397053-6892-4bd6-a62a-788e4b2ab31f-kube-api-access-g9gbw\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.717958 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-a703-account-create-pzpv6" event={"ID":"ce397053-6892-4bd6-a62a-788e4b2ab31f","Type":"ContainerDied","Data":"cc97dcec80e0e1b5734084743b9282c6aa1f5771602841223546b3f14f9c5f8e"} Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.718183 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc97dcec80e0e1b5734084743b9282c6aa1f5771602841223546b3f14f9c5f8e" Nov 28 17:05:21 crc kubenswrapper[4884]: I1128 17:05:21.718044 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-a703-account-create-pzpv6" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.987767 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-q6pfx"] Nov 28 17:05:22 crc kubenswrapper[4884]: E1128 17:05:22.989334 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce397053-6892-4bd6-a62a-788e4b2ab31f" containerName="mariadb-account-create" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.989408 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce397053-6892-4bd6-a62a-788e4b2ab31f" containerName="mariadb-account-create" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.989667 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce397053-6892-4bd6-a62a-788e4b2ab31f" containerName="mariadb-account-create" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.990411 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.993147 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-8jwdk" Nov 28 17:05:22 crc kubenswrapper[4884]: I1128 17:05:22.993581 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.014276 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-q6pfx"] Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.092398 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzxk8\" (UniqueName: \"kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.092450 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.092926 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.195765 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.195918 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzxk8\" (UniqueName: \"kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.195955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.202572 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.217724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzxk8\" (UniqueName: \"kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.219154 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data\") pod \"heat-db-sync-q6pfx\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.314222 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:23 crc kubenswrapper[4884]: I1128 17:05:23.810656 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-q6pfx"] Nov 28 17:05:23 crc kubenswrapper[4884]: W1128 17:05:23.825442 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c3c3d57_bd5f_4c24_bfa2_d5231488b40b.slice/crio-79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b WatchSource:0}: Error finding container 79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b: Status 404 returned error can't find the container with id 79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b Nov 28 17:05:24 crc kubenswrapper[4884]: I1128 17:05:24.886017 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-q6pfx" event={"ID":"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b","Type":"ContainerStarted","Data":"79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b"} Nov 28 17:05:28 crc kubenswrapper[4884]: I1128 17:05:28.708971 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.122638 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6dc467574f-qbf7p" Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.206248 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.206735 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon-log" containerID="cri-o://36ef2eec058c3307eb5a977993715b1e71a570be74ba2b41ce5046aa63826303" gracePeriod=30 Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.206836 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" containerID="cri-o://a6257017442437d8db310e74ed1dbb7e46269f5be18815edf3bfcb49aef095e2" gracePeriod=30 Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.955982 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-q6pfx" event={"ID":"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b","Type":"ContainerStarted","Data":"6e2134556d2cf0adb11acb96df0d5ebe501d38000ff85cdead201b19ab34f321"} Nov 28 17:05:31 crc kubenswrapper[4884]: I1128 17:05:31.988873 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-q6pfx" podStartSLOduration=2.671055819 podStartE2EDuration="9.988515385s" podCreationTimestamp="2025-11-28 17:05:22 +0000 UTC" firstStartedPulling="2025-11-28 17:05:23.827953712 +0000 UTC m=+6363.390737513" lastFinishedPulling="2025-11-28 17:05:31.145413278 +0000 UTC m=+6370.708197079" observedRunningTime="2025-11-28 17:05:31.981205636 +0000 UTC m=+6371.543989437" watchObservedRunningTime="2025-11-28 17:05:31.988515385 +0000 UTC m=+6371.551299186" Nov 28 17:05:32 crc kubenswrapper[4884]: I1128 17:05:32.076232 4884 scope.go:117] "RemoveContainer" containerID="88463fc76124fb44e166f3ca7a089dced44e596f98e9bbe7b305c4c7433677b0" Nov 28 17:05:32 crc kubenswrapper[4884]: I1128 17:05:32.104785 4884 scope.go:117] "RemoveContainer" containerID="b24658c84ba428d1972ead9ec937d2cf25ebf0e12939f0ecfad11f82bc3dc6fb" Nov 28 17:05:32 crc kubenswrapper[4884]: I1128 17:05:32.136769 4884 scope.go:117] "RemoveContainer" containerID="85a491cbcb1c71b0f2815f15062500a70ce51e676422cd6060b28bacaad6b45b" Nov 28 17:05:33 crc kubenswrapper[4884]: I1128 17:05:33.981148 4884 generic.go:334] "Generic (PLEG): container finished" podID="6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" containerID="6e2134556d2cf0adb11acb96df0d5ebe501d38000ff85cdead201b19ab34f321" exitCode=0 Nov 28 17:05:33 crc kubenswrapper[4884]: I1128 17:05:33.981253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-q6pfx" event={"ID":"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b","Type":"ContainerDied","Data":"6e2134556d2cf0adb11acb96df0d5ebe501d38000ff85cdead201b19ab34f321"} Nov 28 17:05:34 crc kubenswrapper[4884]: I1128 17:05:34.991880 4884 generic.go:334] "Generic (PLEG): container finished" podID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerID="a6257017442437d8db310e74ed1dbb7e46269f5be18815edf3bfcb49aef095e2" exitCode=0 Nov 28 17:05:34 crc kubenswrapper[4884]: I1128 17:05:34.991966 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerDied","Data":"a6257017442437d8db310e74ed1dbb7e46269f5be18815edf3bfcb49aef095e2"} Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.373008 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.478124 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data\") pod \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.478213 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle\") pod \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.478445 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzxk8\" (UniqueName: \"kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8\") pod \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\" (UID: \"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b\") " Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.487402 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8" (OuterVolumeSpecName: "kube-api-access-fzxk8") pod "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" (UID: "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b"). InnerVolumeSpecName "kube-api-access-fzxk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.512763 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" (UID: "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.565319 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data" (OuterVolumeSpecName: "config-data") pod "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" (UID: "6c3c3d57-bd5f-4c24-bfa2-d5231488b40b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.581732 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzxk8\" (UniqueName: \"kubernetes.io/projected/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-kube-api-access-fzxk8\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.581786 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:35 crc kubenswrapper[4884]: I1128 17:05:35.581804 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:05:36 crc kubenswrapper[4884]: I1128 17:05:36.005425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-q6pfx" event={"ID":"6c3c3d57-bd5f-4c24-bfa2-d5231488b40b","Type":"ContainerDied","Data":"79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b"} Nov 28 17:05:36 crc kubenswrapper[4884]: I1128 17:05:36.005487 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-q6pfx" Nov 28 17:05:36 crc kubenswrapper[4884]: I1128 17:05:36.005495 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79853492e1e1965d515f445e3f56b6d32a9ddd78458bea488092aad22dd40a1b" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.035615 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-c8f756f68-c4d7x"] Nov 28 17:05:37 crc kubenswrapper[4884]: E1128 17:05:37.036016 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" containerName="heat-db-sync" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.036029 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" containerName="heat-db-sync" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.036276 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" containerName="heat-db-sync" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.036957 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.039419 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-8jwdk" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.039713 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.040378 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.063852 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c8f756f68-c4d7x"] Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.112502 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data-custom\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.112584 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt6k8\" (UniqueName: \"kubernetes.io/projected/e275aeed-0618-42c8-8be3-61142cc18046-kube-api-access-qt6k8\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.112662 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.112686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-combined-ca-bundle\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.214439 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data-custom\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.214494 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt6k8\" (UniqueName: \"kubernetes.io/projected/e275aeed-0618-42c8-8be3-61142cc18046-kube-api-access-qt6k8\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.214552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.214569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-combined-ca-bundle\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.221755 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data-custom\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.223229 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-config-data\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.237693 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e275aeed-0618-42c8-8be3-61142cc18046-combined-ca-bundle\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.248339 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt6k8\" (UniqueName: \"kubernetes.io/projected/e275aeed-0618-42c8-8be3-61142cc18046-kube-api-access-qt6k8\") pod \"heat-engine-c8f756f68-c4d7x\" (UID: \"e275aeed-0618-42c8-8be3-61142cc18046\") " pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.252709 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-698bf6cff5-9rpkn"] Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.254060 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.268446 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.321169 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-698bf6cff5-9rpkn"] Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.356772 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.410249 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5989c7c6f8-rhlzw"] Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.411648 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.417804 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.418525 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data-custom\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.418638 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.418691 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwtqp\" (UniqueName: \"kubernetes.io/projected/cff6e56f-02dd-4083-9493-0fca54d1ca6b-kube-api-access-lwtqp\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.418759 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-combined-ca-bundle\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.424741 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5989c7c6f8-rhlzw"] Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.522844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data-custom\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.522966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523008 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwtqp\" (UniqueName: \"kubernetes.io/projected/cff6e56f-02dd-4083-9493-0fca54d1ca6b-kube-api-access-lwtqp\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523075 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-combined-ca-bundle\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523118 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-combined-ca-bundle\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523136 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data-custom\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.523175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsgjn\" (UniqueName: \"kubernetes.io/projected/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-kube-api-access-dsgjn\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.556562 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-combined-ca-bundle\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.559893 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data-custom\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.562482 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cff6e56f-02dd-4083-9493-0fca54d1ca6b-config-data\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.567892 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwtqp\" (UniqueName: \"kubernetes.io/projected/cff6e56f-02dd-4083-9493-0fca54d1ca6b-kube-api-access-lwtqp\") pod \"heat-api-698bf6cff5-9rpkn\" (UID: \"cff6e56f-02dd-4083-9493-0fca54d1ca6b\") " pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.626326 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data-custom\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.626382 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsgjn\" (UniqueName: \"kubernetes.io/projected/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-kube-api-access-dsgjn\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.626512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.626560 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-combined-ca-bundle\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.630309 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-combined-ca-bundle\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.633980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data-custom\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.639428 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-config-data\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.648345 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsgjn\" (UniqueName: \"kubernetes.io/projected/3b136c41-55db-4e2f-ba80-1e3e80ff5d64-kube-api-access-dsgjn\") pod \"heat-cfnapi-5989c7c6f8-rhlzw\" (UID: \"3b136c41-55db-4e2f-ba80-1e3e80ff5d64\") " pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.667467 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:37 crc kubenswrapper[4884]: I1128 17:05:37.766627 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:38 crc kubenswrapper[4884]: I1128 17:05:38.212326 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-c8f756f68-c4d7x"] Nov 28 17:05:38 crc kubenswrapper[4884]: I1128 17:05:38.327845 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-698bf6cff5-9rpkn"] Nov 28 17:05:38 crc kubenswrapper[4884]: W1128 17:05:38.336347 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcff6e56f_02dd_4083_9493_0fca54d1ca6b.slice/crio-576f305e09e896ab0134a393e93c7c1b081e81311200e0a5ab62d97243144072 WatchSource:0}: Error finding container 576f305e09e896ab0134a393e93c7c1b081e81311200e0a5ab62d97243144072: Status 404 returned error can't find the container with id 576f305e09e896ab0134a393e93c7c1b081e81311200e0a5ab62d97243144072 Nov 28 17:05:38 crc kubenswrapper[4884]: W1128 17:05:38.423769 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b136c41_55db_4e2f_ba80_1e3e80ff5d64.slice/crio-4c8ee536224df9bd8c65720b567e3a0baf900328ef066e8ec133228a46eeec09 WatchSource:0}: Error finding container 4c8ee536224df9bd8c65720b567e3a0baf900328ef066e8ec133228a46eeec09: Status 404 returned error can't find the container with id 4c8ee536224df9bd8c65720b567e3a0baf900328ef066e8ec133228a46eeec09 Nov 28 17:05:38 crc kubenswrapper[4884]: I1128 17:05:38.426240 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5989c7c6f8-rhlzw"] Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.040499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c8f756f68-c4d7x" event={"ID":"e275aeed-0618-42c8-8be3-61142cc18046","Type":"ContainerStarted","Data":"94c332385aa7c5f779f9ab1554e6d7442bb78c2f0bed31b3e4c9d9db8241cde5"} Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.040823 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-c8f756f68-c4d7x" event={"ID":"e275aeed-0618-42c8-8be3-61142cc18046","Type":"ContainerStarted","Data":"88be6a1981b7cf6b2df2a29474ec280ccbe413a19cc362bbeffbf80145745b25"} Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.041970 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.043010 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-698bf6cff5-9rpkn" event={"ID":"cff6e56f-02dd-4083-9493-0fca54d1ca6b","Type":"ContainerStarted","Data":"576f305e09e896ab0134a393e93c7c1b081e81311200e0a5ab62d97243144072"} Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.056013 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" event={"ID":"3b136c41-55db-4e2f-ba80-1e3e80ff5d64","Type":"ContainerStarted","Data":"4c8ee536224df9bd8c65720b567e3a0baf900328ef066e8ec133228a46eeec09"} Nov 28 17:05:39 crc kubenswrapper[4884]: I1128 17:05:39.060157 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-c8f756f68-c4d7x" podStartSLOduration=2.060139824 podStartE2EDuration="2.060139824s" podCreationTimestamp="2025-11-28 17:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:05:39.057785376 +0000 UTC m=+6378.620569187" watchObservedRunningTime="2025-11-28 17:05:39.060139824 +0000 UTC m=+6378.622923625" Nov 28 17:05:40 crc kubenswrapper[4884]: I1128 17:05:40.228872 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.113:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.113:8080: connect: connection refused" Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.087877 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-698bf6cff5-9rpkn" event={"ID":"cff6e56f-02dd-4083-9493-0fca54d1ca6b","Type":"ContainerStarted","Data":"c68eb0ae956788a2a22b7105ccbc248e868b54628860b289b2d1fb049e0456b4"} Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.088423 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.090607 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" event={"ID":"3b136c41-55db-4e2f-ba80-1e3e80ff5d64","Type":"ContainerStarted","Data":"ea33577ad090ac6a3539c6c352212ba01421bce8c97e070b7f0f2860998cf83a"} Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.090650 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.114262 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-698bf6cff5-9rpkn" podStartSLOduration=1.980732081 podStartE2EDuration="4.114239268s" podCreationTimestamp="2025-11-28 17:05:37 +0000 UTC" firstStartedPulling="2025-11-28 17:05:38.341703333 +0000 UTC m=+6377.904487134" lastFinishedPulling="2025-11-28 17:05:40.47521052 +0000 UTC m=+6380.037994321" observedRunningTime="2025-11-28 17:05:41.109750058 +0000 UTC m=+6380.672533869" watchObservedRunningTime="2025-11-28 17:05:41.114239268 +0000 UTC m=+6380.677023069" Nov 28 17:05:41 crc kubenswrapper[4884]: I1128 17:05:41.131168 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" podStartSLOduration=2.084899592 podStartE2EDuration="4.131147074s" podCreationTimestamp="2025-11-28 17:05:37 +0000 UTC" firstStartedPulling="2025-11-28 17:05:38.426296923 +0000 UTC m=+6377.989080724" lastFinishedPulling="2025-11-28 17:05:40.472544415 +0000 UTC m=+6380.035328206" observedRunningTime="2025-11-28 17:05:41.130208251 +0000 UTC m=+6380.692992062" watchObservedRunningTime="2025-11-28 17:05:41.131147074 +0000 UTC m=+6380.693930875" Nov 28 17:05:49 crc kubenswrapper[4884]: I1128 17:05:49.141819 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-698bf6cff5-9rpkn" Nov 28 17:05:49 crc kubenswrapper[4884]: I1128 17:05:49.247402 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5989c7c6f8-rhlzw" Nov 28 17:05:50 crc kubenswrapper[4884]: I1128 17:05:50.229402 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.113:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.113:8080: connect: connection refused" Nov 28 17:05:57 crc kubenswrapper[4884]: I1128 17:05:57.052577 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-zmdmx"] Nov 28 17:05:57 crc kubenswrapper[4884]: I1128 17:05:57.069145 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-hktq5"] Nov 28 17:05:57 crc kubenswrapper[4884]: I1128 17:05:57.080138 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-hktq5"] Nov 28 17:05:57 crc kubenswrapper[4884]: I1128 17:05:57.090818 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-zmdmx"] Nov 28 17:05:57 crc kubenswrapper[4884]: I1128 17:05:57.395789 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-c8f756f68-c4d7x" Nov 28 17:05:58 crc kubenswrapper[4884]: I1128 17:05:58.029704 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-fz74w"] Nov 28 17:05:58 crc kubenswrapper[4884]: I1128 17:05:58.038300 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-fz74w"] Nov 28 17:05:58 crc kubenswrapper[4884]: I1128 17:05:58.705154 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="027e80a9-c0fd-47ec-8614-4ed267b8db45" path="/var/lib/kubelet/pods/027e80a9-c0fd-47ec-8614-4ed267b8db45/volumes" Nov 28 17:05:58 crc kubenswrapper[4884]: I1128 17:05:58.705729 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9f6705f-b8b0-43f7-b5d7-274032c45b93" path="/var/lib/kubelet/pods/b9f6705f-b8b0-43f7-b5d7-274032c45b93/volumes" Nov 28 17:05:58 crc kubenswrapper[4884]: I1128 17:05:58.706260 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d" path="/var/lib/kubelet/pods/d8a0d755-8ab0-437f-a2c3-b1d7a1fb043d/volumes" Nov 28 17:06:00 crc kubenswrapper[4884]: I1128 17:06:00.229367 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-86747bdd49-2sdfl" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.113:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.113:8080: connect: connection refused" Nov 28 17:06:00 crc kubenswrapper[4884]: I1128 17:06:00.229755 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.286357 4884 generic.go:334] "Generic (PLEG): container finished" podID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerID="36ef2eec058c3307eb5a977993715b1e71a570be74ba2b41ce5046aa63826303" exitCode=137 Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.286435 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerDied","Data":"36ef2eec058c3307eb5a977993715b1e71a570be74ba2b41ce5046aa63826303"} Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.678815 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833123 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs\") pod \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833605 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs" (OuterVolumeSpecName: "logs") pod "f81387d2-8047-467c-b9e2-28ceaeb0b4f9" (UID: "f81387d2-8047-467c-b9e2-28ceaeb0b4f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833631 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts\") pod \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6mjn\" (UniqueName: \"kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn\") pod \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833699 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data\") pod \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.833844 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key\") pod \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\" (UID: \"f81387d2-8047-467c-b9e2-28ceaeb0b4f9\") " Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.834373 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-logs\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.838632 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f81387d2-8047-467c-b9e2-28ceaeb0b4f9" (UID: "f81387d2-8047-467c-b9e2-28ceaeb0b4f9"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.838935 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn" (OuterVolumeSpecName: "kube-api-access-v6mjn") pod "f81387d2-8047-467c-b9e2-28ceaeb0b4f9" (UID: "f81387d2-8047-467c-b9e2-28ceaeb0b4f9"). InnerVolumeSpecName "kube-api-access-v6mjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.858968 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data" (OuterVolumeSpecName: "config-data") pod "f81387d2-8047-467c-b9e2-28ceaeb0b4f9" (UID: "f81387d2-8047-467c-b9e2-28ceaeb0b4f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.877313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts" (OuterVolumeSpecName: "scripts") pod "f81387d2-8047-467c-b9e2-28ceaeb0b4f9" (UID: "f81387d2-8047-467c-b9e2-28ceaeb0b4f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.937226 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.937369 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6mjn\" (UniqueName: \"kubernetes.io/projected/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-kube-api-access-v6mjn\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.937428 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:01 crc kubenswrapper[4884]: I1128 17:06:01.937489 4884 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f81387d2-8047-467c-b9e2-28ceaeb0b4f9-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.297470 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86747bdd49-2sdfl" event={"ID":"f81387d2-8047-467c-b9e2-28ceaeb0b4f9","Type":"ContainerDied","Data":"34c18676bd868eb3c53edf5842af4bd12a0a3309ef4b4684d9d6d62d8deaf120"} Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.297525 4884 scope.go:117] "RemoveContainer" containerID="a6257017442437d8db310e74ed1dbb7e46269f5be18815edf3bfcb49aef095e2" Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.297662 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86747bdd49-2sdfl" Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.339655 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.350946 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-86747bdd49-2sdfl"] Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.459829 4884 scope.go:117] "RemoveContainer" containerID="36ef2eec058c3307eb5a977993715b1e71a570be74ba2b41ce5046aa63826303" Nov 28 17:06:02 crc kubenswrapper[4884]: I1128 17:06:02.701468 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" path="/var/lib/kubelet/pods/f81387d2-8047-467c-b9e2-28ceaeb0b4f9/volumes" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.336850 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs"] Nov 28 17:06:06 crc kubenswrapper[4884]: E1128 17:06:06.337735 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.337753 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" Nov 28 17:06:06 crc kubenswrapper[4884]: E1128 17:06:06.337776 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon-log" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.337784 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon-log" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.338056 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.338075 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f81387d2-8047-467c-b9e2-28ceaeb0b4f9" containerName="horizon-log" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.339879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.342283 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.349994 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs"] Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.417763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wswdt\" (UniqueName: \"kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.418139 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.418248 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.519754 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.519833 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wswdt\" (UniqueName: \"kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.519951 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.520377 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.520446 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.545839 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wswdt\" (UniqueName: \"kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:06 crc kubenswrapper[4884]: I1128 17:06:06.661594 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.050499 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-5b90-account-create-fzm75"] Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.060438 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2078-account-create-8xlqx"] Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.069386 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-5b90-account-create-fzm75"] Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.078209 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2078-account-create-8xlqx"] Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.145876 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs"] Nov 28 17:06:07 crc kubenswrapper[4884]: I1128 17:06:07.353303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerStarted","Data":"225dbd1bc7ee2292b6351e082c785abe0d405f6c25d9c0d61af1a78a7f3f7875"} Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.039815 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-fb5d-account-create-plzj8"] Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.048402 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-fb5d-account-create-plzj8"] Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.367626 4884 generic.go:334] "Generic (PLEG): container finished" podID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerID="804496c87a502c40e6f13209a298ea70d4a70f22decb62a000ca8896356ee0a2" exitCode=0 Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.367706 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerDied","Data":"804496c87a502c40e6f13209a298ea70d4a70f22decb62a000ca8896356ee0a2"} Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.372082 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.704450 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e19289-bb36-40c5-abca-c9707c889094" path="/var/lib/kubelet/pods/15e19289-bb36-40c5-abca-c9707c889094/volumes" Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.704984 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b46b792-bae5-4ec1-b696-3219b34136f2" path="/var/lib/kubelet/pods/3b46b792-bae5-4ec1-b696-3219b34136f2/volumes" Nov 28 17:06:08 crc kubenswrapper[4884]: I1128 17:06:08.705779 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee34c725-d8dc-4f85-a494-7d35ddfb9b54" path="/var/lib/kubelet/pods/ee34c725-d8dc-4f85-a494-7d35ddfb9b54/volumes" Nov 28 17:06:10 crc kubenswrapper[4884]: I1128 17:06:10.393222 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerStarted","Data":"42a7c1bb41a1c11377823d06c1f164ecde6892bb23efd038184ddf2391b3068d"} Nov 28 17:06:11 crc kubenswrapper[4884]: I1128 17:06:11.408696 4884 generic.go:334] "Generic (PLEG): container finished" podID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerID="42a7c1bb41a1c11377823d06c1f164ecde6892bb23efd038184ddf2391b3068d" exitCode=0 Nov 28 17:06:11 crc kubenswrapper[4884]: I1128 17:06:11.409134 4884 generic.go:334] "Generic (PLEG): container finished" podID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerID="5e5b6f52d08bbaee654c9bbaa3b83e96184748ca74bfb430a5dd29783d869274" exitCode=0 Nov 28 17:06:11 crc kubenswrapper[4884]: I1128 17:06:11.409167 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerDied","Data":"42a7c1bb41a1c11377823d06c1f164ecde6892bb23efd038184ddf2391b3068d"} Nov 28 17:06:11 crc kubenswrapper[4884]: I1128 17:06:11.409223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerDied","Data":"5e5b6f52d08bbaee654c9bbaa3b83e96184748ca74bfb430a5dd29783d869274"} Nov 28 17:06:12 crc kubenswrapper[4884]: I1128 17:06:12.924427 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.066814 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util\") pod \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.066922 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wswdt\" (UniqueName: \"kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt\") pod \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.066960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle\") pod \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\" (UID: \"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd\") " Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.069142 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle" (OuterVolumeSpecName: "bundle") pod "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" (UID: "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.074219 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt" (OuterVolumeSpecName: "kube-api-access-wswdt") pod "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" (UID: "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd"). InnerVolumeSpecName "kube-api-access-wswdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.078258 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util" (OuterVolumeSpecName: "util") pod "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" (UID: "1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.169555 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-util\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.169808 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wswdt\" (UniqueName: \"kubernetes.io/projected/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-kube-api-access-wswdt\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.169849 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.435779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" event={"ID":"1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd","Type":"ContainerDied","Data":"225dbd1bc7ee2292b6351e082c785abe0d405f6c25d9c0d61af1a78a7f3f7875"} Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.436044 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="225dbd1bc7ee2292b6351e082c785abe0d405f6c25d9c0d61af1a78a7f3f7875" Nov 28 17:06:13 crc kubenswrapper[4884]: I1128 17:06:13.435871 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs" Nov 28 17:06:17 crc kubenswrapper[4884]: I1128 17:06:17.042769 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fvzhr"] Nov 28 17:06:17 crc kubenswrapper[4884]: I1128 17:06:17.053321 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fvzhr"] Nov 28 17:06:18 crc kubenswrapper[4884]: I1128 17:06:18.725351 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71c60737-59f8-414b-857d-bea11d085a23" path="/var/lib/kubelet/pods/71c60737-59f8-414b-857d-bea11d085a23/volumes" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.290547 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd"] Nov 28 17:06:24 crc kubenswrapper[4884]: E1128 17:06:24.292306 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="util" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.292330 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="util" Nov 28 17:06:24 crc kubenswrapper[4884]: E1128 17:06:24.292346 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="extract" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.292361 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="extract" Nov 28 17:06:24 crc kubenswrapper[4884]: E1128 17:06:24.292373 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="pull" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.292380 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="pull" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.299459 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd" containerName="extract" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.300745 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.306559 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-fmnqv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.306813 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.307357 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.311036 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.376793 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.378448 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.383683 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-jv8pw" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.384012 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.412227 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.430111 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.431483 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.471937 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.491739 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.491843 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.491914 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrh8c\" (UniqueName: \"kubernetes.io/projected/9f95eadf-8c98-4cbc-bc58-f0454043ee6b-kube-api-access-mrh8c\") pod \"obo-prometheus-operator-668cf9dfbb-9tckd\" (UID: \"9f95eadf-8c98-4cbc-bc58-f0454043ee6b\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.515469 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tw7tv"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.516802 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.525123 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.525377 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-hwpkv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.537485 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tw7tv"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.594849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.594928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.595002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.595050 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.595164 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrh8c\" (UniqueName: \"kubernetes.io/projected/9f95eadf-8c98-4cbc-bc58-f0454043ee6b-kube-api-access-mrh8c\") pod \"obo-prometheus-operator-668cf9dfbb-9tckd\" (UID: \"9f95eadf-8c98-4cbc-bc58-f0454043ee6b\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.602165 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.607161 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-vqxbd"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.608538 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.613706 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6773cf61-55c4-4432-bc98-92b878f74b05-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g\" (UID: \"6773cf61-55c4-4432-bc98-92b878f74b05\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.616249 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-425rj" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.643004 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrh8c\" (UniqueName: \"kubernetes.io/projected/9f95eadf-8c98-4cbc-bc58-f0454043ee6b-kube-api-access-mrh8c\") pod \"obo-prometheus-operator-668cf9dfbb-9tckd\" (UID: \"9f95eadf-8c98-4cbc-bc58-f0454043ee6b\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.646068 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-vqxbd"] Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.696950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.697389 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.697519 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkhc7\" (UniqueName: \"kubernetes.io/projected/320957a8-3581-4e31-96e6-95f80a3cfcce-kube-api-access-tkhc7\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.697548 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/320957a8-3581-4e31-96e6-95f80a3cfcce-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.713976 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.720201 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.724597 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/527a6443-b807-4583-b7f5-6307ba1cade7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k\" (UID: \"527a6443-b807-4583-b7f5-6307ba1cade7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.752608 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.799612 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbdlg\" (UniqueName: \"kubernetes.io/projected/4d1cc7c0-4594-4443-80a8-237320e0138e-kube-api-access-vbdlg\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.799730 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkhc7\" (UniqueName: \"kubernetes.io/projected/320957a8-3581-4e31-96e6-95f80a3cfcce-kube-api-access-tkhc7\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.799762 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/320957a8-3581-4e31-96e6-95f80a3cfcce-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.799815 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4d1cc7c0-4594-4443-80a8-237320e0138e-openshift-service-ca\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.840486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/320957a8-3581-4e31-96e6-95f80a3cfcce-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.849933 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkhc7\" (UniqueName: \"kubernetes.io/projected/320957a8-3581-4e31-96e6-95f80a3cfcce-kube-api-access-tkhc7\") pod \"observability-operator-d8bb48f5d-tw7tv\" (UID: \"320957a8-3581-4e31-96e6-95f80a3cfcce\") " pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.852954 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.909252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4d1cc7c0-4594-4443-80a8-237320e0138e-openshift-service-ca\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.909387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbdlg\" (UniqueName: \"kubernetes.io/projected/4d1cc7c0-4594-4443-80a8-237320e0138e-kube-api-access-vbdlg\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.910660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/4d1cc7c0-4594-4443-80a8-237320e0138e-openshift-service-ca\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.928112 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" Nov 28 17:06:24 crc kubenswrapper[4884]: I1128 17:06:24.937767 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbdlg\" (UniqueName: \"kubernetes.io/projected/4d1cc7c0-4594-4443-80a8-237320e0138e-kube-api-access-vbdlg\") pod \"perses-operator-5446b9c989-vqxbd\" (UID: \"4d1cc7c0-4594-4443-80a8-237320e0138e\") " pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:25 crc kubenswrapper[4884]: I1128 17:06:25.446770 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.081014 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g"] Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.100996 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd"] Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.260874 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k"] Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.288098 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-tw7tv"] Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.444813 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-vqxbd"] Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.596076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" event={"ID":"527a6443-b807-4583-b7f5-6307ba1cade7","Type":"ContainerStarted","Data":"5c845242817bfbecf470f1a7fb1507467f186bfe46ba0c2eb4deede57bf52d26"} Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.599263 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" event={"ID":"320957a8-3581-4e31-96e6-95f80a3cfcce","Type":"ContainerStarted","Data":"b3a30c6776723257e6520bc4e8b560346e0d012ee47f61606fd118ade46fef05"} Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.600478 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" event={"ID":"6773cf61-55c4-4432-bc98-92b878f74b05","Type":"ContainerStarted","Data":"000bf89f3a1cc561d440536e4ece5b2317e3d790be98c40edb621ca4079f56fa"} Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.609307 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" event={"ID":"9f95eadf-8c98-4cbc-bc58-f0454043ee6b","Type":"ContainerStarted","Data":"646b616a91fcd2b4d449ee884035ee38293dba7461298bda29735389988614c1"} Nov 28 17:06:26 crc kubenswrapper[4884]: I1128 17:06:26.614016 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" event={"ID":"4d1cc7c0-4594-4443-80a8-237320e0138e","Type":"ContainerStarted","Data":"5f0ebdd57e60defd8f1b62d831ed84ee7fbc9456ea1f8a12fc9b242c8db79e50"} Nov 28 17:06:31 crc kubenswrapper[4884]: I1128 17:06:31.686127 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" event={"ID":"9f95eadf-8c98-4cbc-bc58-f0454043ee6b","Type":"ContainerStarted","Data":"54586cee5150ee76b32099640ce1fe99f5627949f42c15fad45190b1531aacaf"} Nov 28 17:06:31 crc kubenswrapper[4884]: I1128 17:06:31.918195 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-9tckd" podStartSLOduration=3.897814541 podStartE2EDuration="7.918152508s" podCreationTimestamp="2025-11-28 17:06:24 +0000 UTC" firstStartedPulling="2025-11-28 17:06:26.117001869 +0000 UTC m=+6425.679785680" lastFinishedPulling="2025-11-28 17:06:30.137339846 +0000 UTC m=+6429.700123647" observedRunningTime="2025-11-28 17:06:31.914622412 +0000 UTC m=+6431.477406223" watchObservedRunningTime="2025-11-28 17:06:31.918152508 +0000 UTC m=+6431.480936329" Nov 28 17:06:32 crc kubenswrapper[4884]: I1128 17:06:32.249601 4884 scope.go:117] "RemoveContainer" containerID="221ee9f0a947f73eaad0856d220a679b5b52e19cbfd3c2601c0e97f7f984c24b" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.138349 4884 scope.go:117] "RemoveContainer" containerID="eb9bf7b638943e5d4598e6b4d7c59fe1e44e624179d0964178190014d9c9889f" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.262586 4884 scope.go:117] "RemoveContainer" containerID="18ddab20017ff8adaf8fb3133e37bceae9a0b075618a6328ea97838c2e7dfae2" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.352458 4884 scope.go:117] "RemoveContainer" containerID="4a024e7059adf7516f6ced612dd32290706d012f1173169999cc33bc32753746" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.402925 4884 scope.go:117] "RemoveContainer" containerID="b8bf57a6c80c5839eb6d7ec044fe5d59725f8cafe2b62160c0e19a6bf12b6e89" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.464408 4884 scope.go:117] "RemoveContainer" containerID="7521e0680f6d1dcf83d7edeabcc49c102cdaa16f996225d4439c6708b94a820c" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.491546 4884 scope.go:117] "RemoveContainer" containerID="62a6840da3b8a531145a1c13670e60c24403673ce0a33b753f55ef189a8fd612" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.729891 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" event={"ID":"320957a8-3581-4e31-96e6-95f80a3cfcce","Type":"ContainerStarted","Data":"527dca21f977f4ae50f582c93c211008555b9ba359699d0cfe4723c53b39032c"} Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.730679 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.732603 4884 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-tw7tv container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.1.127:8081/healthz\": dial tcp 10.217.1.127:8081: connect: connection refused" start-of-body= Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.732669 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" podUID="320957a8-3581-4e31-96e6-95f80a3cfcce" containerName="operator" probeResult="failure" output="Get \"http://10.217.1.127:8081/healthz\": dial tcp 10.217.1.127:8081: connect: connection refused" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.740796 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" event={"ID":"4d1cc7c0-4594-4443-80a8-237320e0138e","Type":"ContainerStarted","Data":"5de6a7d2e053504fd92eb7c28ddf5ff5c7c4645369e49eb496cd41cc673bfdea"} Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.741993 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.751640 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" event={"ID":"6773cf61-55c4-4432-bc98-92b878f74b05","Type":"ContainerStarted","Data":"1df3f3c5554e5e1bb3ba9d32aad4be0e2c80acf0596fdbff324aa356ea02a8e1"} Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.754583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" event={"ID":"527a6443-b807-4583-b7f5-6307ba1cade7","Type":"ContainerStarted","Data":"fa4da43d684cd1f2eb2455ec358e9ade60e335948ed19cc9b8d2e9b62ede5890"} Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.765355 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" podStartSLOduration=2.882535289 podStartE2EDuration="10.76533558s" podCreationTimestamp="2025-11-28 17:06:24 +0000 UTC" firstStartedPulling="2025-11-28 17:06:26.294954366 +0000 UTC m=+6425.857738167" lastFinishedPulling="2025-11-28 17:06:34.177754657 +0000 UTC m=+6433.740538458" observedRunningTime="2025-11-28 17:06:34.760409848 +0000 UTC m=+6434.323193649" watchObservedRunningTime="2025-11-28 17:06:34.76533558 +0000 UTC m=+6434.328119371" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.803402 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" podStartSLOduration=6.823989265 podStartE2EDuration="10.803382425s" podCreationTimestamp="2025-11-28 17:06:24 +0000 UTC" firstStartedPulling="2025-11-28 17:06:26.49471651 +0000 UTC m=+6426.057500311" lastFinishedPulling="2025-11-28 17:06:30.47410967 +0000 UTC m=+6430.036893471" observedRunningTime="2025-11-28 17:06:34.793932543 +0000 UTC m=+6434.356716344" watchObservedRunningTime="2025-11-28 17:06:34.803382425 +0000 UTC m=+6434.366166226" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.825954 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k" podStartSLOduration=6.96765142 podStartE2EDuration="10.8259311s" podCreationTimestamp="2025-11-28 17:06:24 +0000 UTC" firstStartedPulling="2025-11-28 17:06:26.279519957 +0000 UTC m=+6425.842303758" lastFinishedPulling="2025-11-28 17:06:30.137799637 +0000 UTC m=+6429.700583438" observedRunningTime="2025-11-28 17:06:34.809882135 +0000 UTC m=+6434.372665946" watchObservedRunningTime="2025-11-28 17:06:34.8259311 +0000 UTC m=+6434.388714901" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.837625 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g" podStartSLOduration=6.82661808 podStartE2EDuration="10.837606017s" podCreationTimestamp="2025-11-28 17:06:24 +0000 UTC" firstStartedPulling="2025-11-28 17:06:26.131440414 +0000 UTC m=+6425.694224215" lastFinishedPulling="2025-11-28 17:06:30.142428351 +0000 UTC m=+6429.705212152" observedRunningTime="2025-11-28 17:06:34.8373193 +0000 UTC m=+6434.400103111" watchObservedRunningTime="2025-11-28 17:06:34.837606017 +0000 UTC m=+6434.400389828" Nov 28 17:06:34 crc kubenswrapper[4884]: I1128 17:06:34.855501 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-tw7tv" Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.030955 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-kqdzv"] Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.041124 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6dsxb"] Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.049249 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-kqdzv"] Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.064385 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-6dsxb"] Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.700660 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06cbddb0-9fd4-44dc-b931-ed32606c010d" path="/var/lib/kubelet/pods/06cbddb0-9fd4-44dc-b931-ed32606c010d/volumes" Nov 28 17:06:36 crc kubenswrapper[4884]: I1128 17:06:36.701777 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46544cf8-e2f2-4788-9d06-27a91f38e9de" path="/var/lib/kubelet/pods/46544cf8-e2f2-4788-9d06-27a91f38e9de/volumes" Nov 28 17:06:45 crc kubenswrapper[4884]: I1128 17:06:45.452388 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-vqxbd" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.620928 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.621718 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" containerName="openstackclient" containerID="cri-o://08a07c4deb7f3e926d336ec4afa1ce7ca389b2feafe1d993207d59d1de597054" gracePeriod=2 Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.631770 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.696152 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 17:06:47 crc kubenswrapper[4884]: E1128 17:06:47.696691 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" containerName="openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.696717 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" containerName="openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.697008 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" containerName="openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.697968 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.730483 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.732509 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" podUID="d87676c1-cb88-4f8a-8151-421c0ef330fe" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.857866 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9h2j\" (UniqueName: \"kubernetes.io/projected/d87676c1-cb88-4f8a-8151-421c0ef330fe-kube-api-access-c9h2j\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.857989 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.858211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config-secret\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.959615 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.959797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config-secret\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.959844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9h2j\" (UniqueName: \"kubernetes.io/projected/d87676c1-cb88-4f8a-8151-421c0ef330fe-kube-api-access-c9h2j\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.960950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:47 crc kubenswrapper[4884]: I1128 17:06:47.971230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d87676c1-cb88-4f8a-8151-421c0ef330fe-openstack-config-secret\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.000783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9h2j\" (UniqueName: \"kubernetes.io/projected/d87676c1-cb88-4f8a-8151-421c0ef330fe-kube-api-access-c9h2j\") pod \"openstackclient\" (UID: \"d87676c1-cb88-4f8a-8151-421c0ef330fe\") " pod="openstack/openstackclient" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.019582 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.021626 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.031442 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-wx2fm" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.036684 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.103907 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.171544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vszjh\" (UniqueName: \"kubernetes.io/projected/0525b455-8b24-46be-af20-2a91f79b2eae-kube-api-access-vszjh\") pod \"kube-state-metrics-0\" (UID: \"0525b455-8b24-46be-af20-2a91f79b2eae\") " pod="openstack/kube-state-metrics-0" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.275377 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vszjh\" (UniqueName: \"kubernetes.io/projected/0525b455-8b24-46be-af20-2a91f79b2eae-kube-api-access-vszjh\") pod \"kube-state-metrics-0\" (UID: \"0525b455-8b24-46be-af20-2a91f79b2eae\") " pod="openstack/kube-state-metrics-0" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.352248 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vszjh\" (UniqueName: \"kubernetes.io/projected/0525b455-8b24-46be-af20-2a91f79b2eae-kube-api-access-vszjh\") pod \"kube-state-metrics-0\" (UID: \"0525b455-8b24-46be-af20-2a91f79b2eae\") " pod="openstack/kube-state-metrics-0" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.540579 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.657735 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.659895 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.714501 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.714928 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.715113 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx27n\" (UniqueName: \"kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.744379 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.816868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx27n\" (UniqueName: \"kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.816982 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.817024 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.817653 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.818109 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:48 crc kubenswrapper[4884]: I1128 17:06:48.889861 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx27n\" (UniqueName: \"kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n\") pod \"certified-operators-ft7sd\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.043653 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.058275 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.060558 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.065139 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.065197 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.065357 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.065497 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.065886 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-c5bd5" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.115141 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.124981 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.125355 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.125378 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.125422 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.131210 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.131344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.131363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4rbt\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-kube-api-access-r4rbt\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.234374 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.235805 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.236015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.236308 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.236560 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.236758 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.236876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4rbt\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-kube-api-access-r4rbt\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.237126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.243870 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.265784 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.266334 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7f935e19-a4e0-4ac8-8706-341c2a6495a0-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.270918 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.278332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7f935e19-a4e0-4ac8-8706-341c2a6495a0-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.278627 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.304949 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4rbt\" (UniqueName: \"kubernetes.io/projected/7f935e19-a4e0-4ac8-8706-341c2a6495a0-kube-api-access-r4rbt\") pod \"alertmanager-metric-storage-0\" (UID: \"7f935e19-a4e0-4ac8-8706-341c2a6495a0\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.442895 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.456873 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.461923 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.462342 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.462498 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.462700 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-88229" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.462844 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.462990 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.511698 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.512415 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.545292 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.609568 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.609665 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4c97248e-a5bc-4d82-a535-f4701b40dbf0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.609738 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.609835 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj9dl\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-kube-api-access-pj9dl\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.609952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.610237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.610313 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.610450 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.721684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.722876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.722984 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.723181 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.723364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.723477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4c97248e-a5bc-4d82-a535-f4701b40dbf0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.723590 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.723720 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj9dl\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-kube-api-access-pj9dl\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.725152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/4c97248e-a5bc-4d82-a535-f4701b40dbf0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.734912 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.734977 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.735016 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9058cf89d6f2986b86a076e99ff94b15479cbd3e9c0d95d4e388dfd5089b6331/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.735248 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.736257 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.736894 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.737484 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c97248e-a5bc-4d82-a535-f4701b40dbf0-config\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.764789 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj9dl\" (UniqueName: \"kubernetes.io/projected/4c97248e-a5bc-4d82-a535-f4701b40dbf0-kube-api-access-pj9dl\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.862272 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc2d71ad-b697-46ea-b166-2d5b2ada1a64\") pod \"prometheus-metric-storage-0\" (UID: \"4c97248e-a5bc-4d82-a535-f4701b40dbf0\") " pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.883843 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.991749 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d87676c1-cb88-4f8a-8151-421c0ef330fe","Type":"ContainerStarted","Data":"93af5e64e2487a199aa8e328b676b4fabcbd73de0714f833b80a74d4f8a19c66"} Nov 28 17:06:49 crc kubenswrapper[4884]: I1128 17:06:49.992041 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d87676c1-cb88-4f8a-8151-421c0ef330fe","Type":"ContainerStarted","Data":"6eee5277723fe511ca384dc1065a426fe533e3babaace756fb8b094ec17f8331"} Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.033451 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerStarted","Data":"57ad14f1012bde79ef8f0b86b1c249212a2d4efe0ea7e2e27e7d0ab08de325cb"} Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.055361 4884 generic.go:334] "Generic (PLEG): container finished" podID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" containerID="08a07c4deb7f3e926d336ec4afa1ce7ca389b2feafe1d993207d59d1de597054" exitCode=137 Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.071751 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.071729166 podStartE2EDuration="3.071729166s" podCreationTimestamp="2025-11-28 17:06:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:06:50.028582075 +0000 UTC m=+6449.591365886" watchObservedRunningTime="2025-11-28 17:06:50.071729166 +0000 UTC m=+6449.634512967" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.088468 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0525b455-8b24-46be-af20-2a91f79b2eae","Type":"ContainerStarted","Data":"b1829f65ec2186e89712a3924e136e5c30cacbcfa3f5c7870c5977dc87dc2568"} Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.118787 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.466941 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.751991 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.780787 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret\") pod \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.781282 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5lqz\" (UniqueName: \"kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz\") pod \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.781387 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config\") pod \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\" (UID: \"fdccee80-ab8b-44a4-999d-e6cc64d2cc98\") " Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.798113 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" podUID="d87676c1-cb88-4f8a-8151-421c0ef330fe" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.807726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz" (OuterVolumeSpecName: "kube-api-access-n5lqz") pod "fdccee80-ab8b-44a4-999d-e6cc64d2cc98" (UID: "fdccee80-ab8b-44a4-999d-e6cc64d2cc98"). InnerVolumeSpecName "kube-api-access-n5lqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.831230 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "fdccee80-ab8b-44a4-999d-e6cc64d2cc98" (UID: "fdccee80-ab8b-44a4-999d-e6cc64d2cc98"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.850012 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.893823 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5lqz\" (UniqueName: \"kubernetes.io/projected/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-kube-api-access-n5lqz\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.893851 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.900722 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "fdccee80-ab8b-44a4-999d-e6cc64d2cc98" (UID: "fdccee80-ab8b-44a4-999d-e6cc64d2cc98"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:06:50 crc kubenswrapper[4884]: I1128 17:06:50.996226 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fdccee80-ab8b-44a4-999d-e6cc64d2cc98-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.100542 4884 generic.go:334] "Generic (PLEG): container finished" podID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerID="be869b9b4d432314a64c65f076a7e42ff5a0dfa145d307022d3673a450ce8f9b" exitCode=0 Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.100624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerDied","Data":"be869b9b4d432314a64c65f076a7e42ff5a0dfa145d307022d3673a450ce8f9b"} Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.102998 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerStarted","Data":"21bd22ffcce21b4582506155ea9967787a56ed949e9d17301f424a3983611ed5"} Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.106429 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.106435 4884 scope.go:117] "RemoveContainer" containerID="08a07c4deb7f3e926d336ec4afa1ce7ca389b2feafe1d993207d59d1de597054" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.110520 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7f935e19-a4e0-4ac8-8706-341c2a6495a0","Type":"ContainerStarted","Data":"a9b4502ea45e2cd7ecbabf40b6c888984362811a2d3073f7b0bb2478ec140f3b"} Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.113099 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0525b455-8b24-46be-af20-2a91f79b2eae","Type":"ContainerStarted","Data":"99da5415693d550e69f3027c3b09fd231b770f86b2aec85de4442c2c694dee51"} Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.126072 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" podUID="d87676c1-cb88-4f8a-8151-421c0ef330fe" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.150679 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" podUID="d87676c1-cb88-4f8a-8151-421c0ef330fe" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.153545 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.619347486 podStartE2EDuration="4.153531604s" podCreationTimestamp="2025-11-28 17:06:47 +0000 UTC" firstStartedPulling="2025-11-28 17:06:49.499357998 +0000 UTC m=+6449.062141799" lastFinishedPulling="2025-11-28 17:06:50.033542116 +0000 UTC m=+6449.596325917" observedRunningTime="2025-11-28 17:06:51.144640696 +0000 UTC m=+6450.707424497" watchObservedRunningTime="2025-11-28 17:06:51.153531604 +0000 UTC m=+6450.716315405" Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.243828 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:06:51 crc kubenswrapper[4884]: I1128 17:06:51.243884 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:06:52 crc kubenswrapper[4884]: I1128 17:06:52.129563 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 17:06:52 crc kubenswrapper[4884]: I1128 17:06:52.710366 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdccee80-ab8b-44a4-999d-e6cc64d2cc98" path="/var/lib/kubelet/pods/fdccee80-ab8b-44a4-999d-e6cc64d2cc98/volumes" Nov 28 17:06:53 crc kubenswrapper[4884]: I1128 17:06:53.142318 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerStarted","Data":"f80c3225c31a55420dc1542afa2031faa2b0f4584f749569d450ec9f58ebab44"} Nov 28 17:06:53 crc kubenswrapper[4884]: E1128 17:06:53.943306 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c80924e_6a7d_4a22_987d_bd82d39d2283.slice/crio-f80c3225c31a55420dc1542afa2031faa2b0f4584f749569d450ec9f58ebab44.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:06:55 crc kubenswrapper[4884]: I1128 17:06:55.165307 4884 generic.go:334] "Generic (PLEG): container finished" podID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerID="f80c3225c31a55420dc1542afa2031faa2b0f4584f749569d450ec9f58ebab44" exitCode=0 Nov 28 17:06:55 crc kubenswrapper[4884]: I1128 17:06:55.165667 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerDied","Data":"f80c3225c31a55420dc1542afa2031faa2b0f4584f749569d450ec9f58ebab44"} Nov 28 17:06:56 crc kubenswrapper[4884]: I1128 17:06:56.030197 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-r9nwp"] Nov 28 17:06:56 crc kubenswrapper[4884]: I1128 17:06:56.048028 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-r9nwp"] Nov 28 17:06:56 crc kubenswrapper[4884]: I1128 17:06:56.193619 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerStarted","Data":"43bd7406fc09daab6452bf40674531927f4c2cf22200bd4afd2d2447c0ab9056"} Nov 28 17:06:56 crc kubenswrapper[4884]: I1128 17:06:56.275564 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ft7sd" podStartSLOduration=3.562568555 podStartE2EDuration="8.27554494s" podCreationTimestamp="2025-11-28 17:06:48 +0000 UTC" firstStartedPulling="2025-11-28 17:06:51.102901749 +0000 UTC m=+6450.665685550" lastFinishedPulling="2025-11-28 17:06:55.815878134 +0000 UTC m=+6455.378661935" observedRunningTime="2025-11-28 17:06:56.266624051 +0000 UTC m=+6455.829407862" watchObservedRunningTime="2025-11-28 17:06:56.27554494 +0000 UTC m=+6455.838328741" Nov 28 17:06:56 crc kubenswrapper[4884]: I1128 17:06:56.704123 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="283b5474-9682-4dbb-a66e-0c6b39b23398" path="/var/lib/kubelet/pods/283b5474-9682-4dbb-a66e-0c6b39b23398/volumes" Nov 28 17:06:58 crc kubenswrapper[4884]: I1128 17:06:58.212081 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerStarted","Data":"60640be9e26f27259aae78d4ef2162beb814c5eb7c82f7ccccad6c952b3213f0"} Nov 28 17:06:58 crc kubenswrapper[4884]: I1128 17:06:58.587352 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 17:06:59 crc kubenswrapper[4884]: I1128 17:06:59.044973 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:59 crc kubenswrapper[4884]: I1128 17:06:59.045036 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:59 crc kubenswrapper[4884]: I1128 17:06:59.097117 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:06:59 crc kubenswrapper[4884]: I1128 17:06:59.225706 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7f935e19-a4e0-4ac8-8706-341c2a6495a0","Type":"ContainerStarted","Data":"8b0c49061f3d49bb33da54723575f064c455561b08761779c98a1b31f589da2c"} Nov 28 17:07:05 crc kubenswrapper[4884]: I1128 17:07:05.281388 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c97248e-a5bc-4d82-a535-f4701b40dbf0" containerID="60640be9e26f27259aae78d4ef2162beb814c5eb7c82f7ccccad6c952b3213f0" exitCode=0 Nov 28 17:07:05 crc kubenswrapper[4884]: I1128 17:07:05.281578 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerDied","Data":"60640be9e26f27259aae78d4ef2162beb814c5eb7c82f7ccccad6c952b3213f0"} Nov 28 17:07:05 crc kubenswrapper[4884]: I1128 17:07:05.292056 4884 generic.go:334] "Generic (PLEG): container finished" podID="7f935e19-a4e0-4ac8-8706-341c2a6495a0" containerID="8b0c49061f3d49bb33da54723575f064c455561b08761779c98a1b31f589da2c" exitCode=0 Nov 28 17:07:05 crc kubenswrapper[4884]: I1128 17:07:05.292116 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7f935e19-a4e0-4ac8-8706-341c2a6495a0","Type":"ContainerDied","Data":"8b0c49061f3d49bb33da54723575f064c455561b08761779c98a1b31f589da2c"} Nov 28 17:07:09 crc kubenswrapper[4884]: I1128 17:07:09.092691 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:07:09 crc kubenswrapper[4884]: I1128 17:07:09.337362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7f935e19-a4e0-4ac8-8706-341c2a6495a0","Type":"ContainerStarted","Data":"768cc6ed3ead39d1ee7297dc41e2a1a9cd4197969549f09ca954ceef82a0f512"} Nov 28 17:07:11 crc kubenswrapper[4884]: I1128 17:07:11.498036 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:07:11 crc kubenswrapper[4884]: I1128 17:07:11.498710 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ft7sd" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="registry-server" containerID="cri-o://43bd7406fc09daab6452bf40674531927f4c2cf22200bd4afd2d2447c0ab9056" gracePeriod=2 Nov 28 17:07:12 crc kubenswrapper[4884]: I1128 17:07:12.368651 4884 generic.go:334] "Generic (PLEG): container finished" podID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerID="43bd7406fc09daab6452bf40674531927f4c2cf22200bd4afd2d2447c0ab9056" exitCode=0 Nov 28 17:07:12 crc kubenswrapper[4884]: I1128 17:07:12.368793 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerDied","Data":"43bd7406fc09daab6452bf40674531927f4c2cf22200bd4afd2d2447c0ab9056"} Nov 28 17:07:14 crc kubenswrapper[4884]: I1128 17:07:14.386821 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"7f935e19-a4e0-4ac8-8706-341c2a6495a0","Type":"ContainerStarted","Data":"b78aa09ba66fa348424f93f7063b57d48111b6a7c21ee2e9b6f2a3d2e37e9fa3"} Nov 28 17:07:14 crc kubenswrapper[4884]: I1128 17:07:14.387460 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:07:14 crc kubenswrapper[4884]: I1128 17:07:14.390173 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 28 17:07:14 crc kubenswrapper[4884]: I1128 17:07:14.422883 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=8.462721814 podStartE2EDuration="26.422858934s" podCreationTimestamp="2025-11-28 17:06:48 +0000 UTC" firstStartedPulling="2025-11-28 17:06:50.514374604 +0000 UTC m=+6450.077158405" lastFinishedPulling="2025-11-28 17:07:08.474511724 +0000 UTC m=+6468.037295525" observedRunningTime="2025-11-28 17:07:14.41373675 +0000 UTC m=+6473.976520601" watchObservedRunningTime="2025-11-28 17:07:14.422858934 +0000 UTC m=+6473.985642785" Nov 28 17:07:14 crc kubenswrapper[4884]: I1128 17:07:14.894111 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.006630 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities\") pod \"9c80924e-6a7d-4a22-987d-bd82d39d2283\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.006713 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wx27n\" (UniqueName: \"kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n\") pod \"9c80924e-6a7d-4a22-987d-bd82d39d2283\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.006822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content\") pod \"9c80924e-6a7d-4a22-987d-bd82d39d2283\" (UID: \"9c80924e-6a7d-4a22-987d-bd82d39d2283\") " Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.007756 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities" (OuterVolumeSpecName: "utilities") pod "9c80924e-6a7d-4a22-987d-bd82d39d2283" (UID: "9c80924e-6a7d-4a22-987d-bd82d39d2283"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.016600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n" (OuterVolumeSpecName: "kube-api-access-wx27n") pod "9c80924e-6a7d-4a22-987d-bd82d39d2283" (UID: "9c80924e-6a7d-4a22-987d-bd82d39d2283"). InnerVolumeSpecName "kube-api-access-wx27n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.063178 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c80924e-6a7d-4a22-987d-bd82d39d2283" (UID: "9c80924e-6a7d-4a22-987d-bd82d39d2283"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.109592 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.109629 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wx27n\" (UniqueName: \"kubernetes.io/projected/9c80924e-6a7d-4a22-987d-bd82d39d2283-kube-api-access-wx27n\") on node \"crc\" DevicePath \"\"" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.109644 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c80924e-6a7d-4a22-987d-bd82d39d2283-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.398168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft7sd" event={"ID":"9c80924e-6a7d-4a22-987d-bd82d39d2283","Type":"ContainerDied","Data":"57ad14f1012bde79ef8f0b86b1c249212a2d4efe0ea7e2e27e7d0ab08de325cb"} Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.398193 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft7sd" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.398232 4884 scope.go:117] "RemoveContainer" containerID="43bd7406fc09daab6452bf40674531927f4c2cf22200bd4afd2d2447c0ab9056" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.447019 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.455948 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ft7sd"] Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.665350 4884 scope.go:117] "RemoveContainer" containerID="f80c3225c31a55420dc1542afa2031faa2b0f4584f749569d450ec9f58ebab44" Nov 28 17:07:15 crc kubenswrapper[4884]: I1128 17:07:15.690624 4884 scope.go:117] "RemoveContainer" containerID="be869b9b4d432314a64c65f076a7e42ff5a0dfa145d307022d3673a450ce8f9b" Nov 28 17:07:16 crc kubenswrapper[4884]: I1128 17:07:16.418251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerStarted","Data":"21be70bfdb293e91e5ce4f0c73e213e399bc228fe570b55528e19668748a8cec"} Nov 28 17:07:16 crc kubenswrapper[4884]: I1128 17:07:16.699195 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" path="/var/lib/kubelet/pods/9c80924e-6a7d-4a22-987d-bd82d39d2283/volumes" Nov 28 17:07:21 crc kubenswrapper[4884]: I1128 17:07:21.160718 4884 patch_prober.go:28] interesting pod/oauth-openshift-7f687b986-777hq container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 17:07:21 crc kubenswrapper[4884]: I1128 17:07:21.161081 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-7f687b986-777hq" podUID="6ba420f4-673f-44b1-9ad4-526b4497307f" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.56:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 17:07:21 crc kubenswrapper[4884]: I1128 17:07:21.243683 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:07:21 crc kubenswrapper[4884]: I1128 17:07:21.243766 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:07:22 crc kubenswrapper[4884]: I1128 17:07:22.493550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerStarted","Data":"248a7c835309c40686ff1800129d2e9f8561aaa31eb1d778d3ce37c4a940f4ca"} Nov 28 17:07:28 crc kubenswrapper[4884]: I1128 17:07:28.752937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"4c97248e-a5bc-4d82-a535-f4701b40dbf0","Type":"ContainerStarted","Data":"4bb8de0ba9c80f6143822530aec41e44a4fd65339810fc8467a9f7ec66af65b1"} Nov 28 17:07:28 crc kubenswrapper[4884]: I1128 17:07:28.784308 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.188584134 podStartE2EDuration="40.784290638s" podCreationTimestamp="2025-11-28 17:06:48 +0000 UTC" firstStartedPulling="2025-11-28 17:06:50.873990999 +0000 UTC m=+6450.436774800" lastFinishedPulling="2025-11-28 17:07:27.469697513 +0000 UTC m=+6487.032481304" observedRunningTime="2025-11-28 17:07:28.781887369 +0000 UTC m=+6488.344671170" watchObservedRunningTime="2025-11-28 17:07:28.784290638 +0000 UTC m=+6488.347074439" Nov 28 17:07:30 crc kubenswrapper[4884]: I1128 17:07:30.119544 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.171924 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:07:33 crc kubenswrapper[4884]: E1128 17:07:33.172744 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="extract-utilities" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.172759 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="extract-utilities" Nov 28 17:07:33 crc kubenswrapper[4884]: E1128 17:07:33.172772 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="registry-server" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.172778 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="registry-server" Nov 28 17:07:33 crc kubenswrapper[4884]: E1128 17:07:33.172792 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="extract-content" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.172799 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="extract-content" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.172989 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c80924e-6a7d-4a22-987d-bd82d39d2283" containerName="registry-server" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.176739 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.178793 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.178850 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.191791 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339727 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339899 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sldrx\" (UniqueName: \"kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.339941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441426 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441509 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441565 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sldrx\" (UniqueName: \"kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441610 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.441635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.443603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.443793 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.447128 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.447395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.448185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.459322 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.473811 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sldrx\" (UniqueName: \"kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx\") pod \"ceilometer-0\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " pod="openstack/ceilometer-0" Nov 28 17:07:33 crc kubenswrapper[4884]: I1128 17:07:33.503358 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.177371 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.657518 4884 scope.go:117] "RemoveContainer" containerID="9f5ec90f5f859ec359b101e0c818481f80cc6a01695cb78f6fc5e511e74d3669" Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.702668 4884 scope.go:117] "RemoveContainer" containerID="850d601e9633378d40912316fea9ac54177f93a36774f682baae8ff8d1d184ab" Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.767199 4884 scope.go:117] "RemoveContainer" containerID="ab2bfdb91e56b5a4239bed2b448579206e305374756eee2dd089ba730a499b05" Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.822164 4884 scope.go:117] "RemoveContainer" containerID="109397a594cf0a69b6dd9009cb79b412b045ad2bfc86168000b49673ca7daa90" Nov 28 17:07:34 crc kubenswrapper[4884]: I1128 17:07:34.836992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerStarted","Data":"7556895db1fbfe19e217085769c1bac77038ce70a783538d8e963cf4664afd6a"} Nov 28 17:07:35 crc kubenswrapper[4884]: I1128 17:07:35.119906 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 28 17:07:35 crc kubenswrapper[4884]: I1128 17:07:35.122314 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 28 17:07:35 crc kubenswrapper[4884]: I1128 17:07:35.859247 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerStarted","Data":"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13"} Nov 28 17:07:35 crc kubenswrapper[4884]: I1128 17:07:35.861791 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 28 17:07:37 crc kubenswrapper[4884]: I1128 17:07:37.881043 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerStarted","Data":"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8"} Nov 28 17:07:38 crc kubenswrapper[4884]: I1128 17:07:38.039355 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-lw9qd"] Nov 28 17:07:38 crc kubenswrapper[4884]: I1128 17:07:38.057993 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-lw9qd"] Nov 28 17:07:38 crc kubenswrapper[4884]: I1128 17:07:38.700564 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f92a45a6-8ede-457a-bc9b-9364ed480d25" path="/var/lib/kubelet/pods/f92a45a6-8ede-457a-bc9b-9364ed480d25/volumes" Nov 28 17:07:38 crc kubenswrapper[4884]: I1128 17:07:38.890836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerStarted","Data":"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d"} Nov 28 17:07:40 crc kubenswrapper[4884]: I1128 17:07:40.913358 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerStarted","Data":"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc"} Nov 28 17:07:40 crc kubenswrapper[4884]: I1128 17:07:40.913948 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:07:40 crc kubenswrapper[4884]: I1128 17:07:40.936929 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.121429663 podStartE2EDuration="7.936910985s" podCreationTimestamp="2025-11-28 17:07:33 +0000 UTC" firstStartedPulling="2025-11-28 17:07:34.169368205 +0000 UTC m=+6493.732152006" lastFinishedPulling="2025-11-28 17:07:39.984849527 +0000 UTC m=+6499.547633328" observedRunningTime="2025-11-28 17:07:40.930772994 +0000 UTC m=+6500.493556795" watchObservedRunningTime="2025-11-28 17:07:40.936910985 +0000 UTC m=+6500.499694786" Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.544441 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-wgzbn"] Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.547962 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.577700 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-wgzbn"] Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.663203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slrpv\" (UniqueName: \"kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv\") pod \"aodh-db-create-wgzbn\" (UID: \"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec\") " pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.765439 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slrpv\" (UniqueName: \"kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv\") pod \"aodh-db-create-wgzbn\" (UID: \"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec\") " pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.798391 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slrpv\" (UniqueName: \"kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv\") pod \"aodh-db-create-wgzbn\" (UID: \"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec\") " pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:43 crc kubenswrapper[4884]: I1128 17:07:43.871778 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:44 crc kubenswrapper[4884]: I1128 17:07:44.482282 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-wgzbn"] Nov 28 17:07:44 crc kubenswrapper[4884]: W1128 17:07:44.484165 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabe3c9d7_5ae4_4cea_b6f0_739db22b8eec.slice/crio-92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006 WatchSource:0}: Error finding container 92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006: Status 404 returned error can't find the container with id 92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006 Nov 28 17:07:44 crc kubenswrapper[4884]: I1128 17:07:44.954291 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wgzbn" event={"ID":"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec","Type":"ContainerStarted","Data":"92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006"} Nov 28 17:07:45 crc kubenswrapper[4884]: I1128 17:07:45.980855 4884 generic.go:334] "Generic (PLEG): container finished" podID="abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" containerID="a03cfb62aa61e91ac0eef2d5295bf504a261012a75869df80510b0edb59db2c2" exitCode=0 Nov 28 17:07:45 crc kubenswrapper[4884]: I1128 17:07:45.980968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wgzbn" event={"ID":"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec","Type":"ContainerDied","Data":"a03cfb62aa61e91ac0eef2d5295bf504a261012a75869df80510b0edb59db2c2"} Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.375364 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.554072 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slrpv\" (UniqueName: \"kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv\") pod \"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec\" (UID: \"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec\") " Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.559764 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv" (OuterVolumeSpecName: "kube-api-access-slrpv") pod "abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" (UID: "abe3c9d7-5ae4-4cea-b6f0-739db22b8eec"). InnerVolumeSpecName "kube-api-access-slrpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.657953 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slrpv\" (UniqueName: \"kubernetes.io/projected/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec-kube-api-access-slrpv\") on node \"crc\" DevicePath \"\"" Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.998654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-wgzbn" event={"ID":"abe3c9d7-5ae4-4cea-b6f0-739db22b8eec","Type":"ContainerDied","Data":"92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006"} Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.998898 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92ed2e0c96c26bc176d889d15385f03f14fee811f0430dcc58879789a203b006" Nov 28 17:07:47 crc kubenswrapper[4884]: I1128 17:07:47.998726 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-wgzbn" Nov 28 17:07:48 crc kubenswrapper[4884]: I1128 17:07:48.030465 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5f1f-account-create-fgnw4"] Nov 28 17:07:48 crc kubenswrapper[4884]: I1128 17:07:48.042012 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5f1f-account-create-fgnw4"] Nov 28 17:07:48 crc kubenswrapper[4884]: I1128 17:07:48.704743 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e329d7c7-53b2-4a02-9119-ca75c0d423fd" path="/var/lib/kubelet/pods/e329d7c7-53b2-4a02-9119-ca75c0d423fd/volumes" Nov 28 17:07:51 crc kubenswrapper[4884]: I1128 17:07:51.242629 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:07:51 crc kubenswrapper[4884]: I1128 17:07:51.242996 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:07:51 crc kubenswrapper[4884]: I1128 17:07:51.243044 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:07:51 crc kubenswrapper[4884]: I1128 17:07:51.244120 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:07:51 crc kubenswrapper[4884]: I1128 17:07:51.244201 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" gracePeriod=600 Nov 28 17:07:51 crc kubenswrapper[4884]: E1128 17:07:51.374341 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:07:52 crc kubenswrapper[4884]: I1128 17:07:52.048687 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" exitCode=0 Nov 28 17:07:52 crc kubenswrapper[4884]: I1128 17:07:52.048735 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2"} Nov 28 17:07:52 crc kubenswrapper[4884]: I1128 17:07:52.049062 4884 scope.go:117] "RemoveContainer" containerID="b85eb04dabfd5877cb92540e0cb996b7befe8c09494a73f2d3f896e81d9d08d7" Nov 28 17:07:52 crc kubenswrapper[4884]: I1128 17:07:52.049933 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:07:52 crc kubenswrapper[4884]: E1128 17:07:52.050497 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.554128 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-3e39-account-create-zzz2j"] Nov 28 17:07:53 crc kubenswrapper[4884]: E1128 17:07:53.554777 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" containerName="mariadb-database-create" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.554789 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" containerName="mariadb-database-create" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.555030 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" containerName="mariadb-database-create" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.555773 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.567179 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.570159 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3e39-account-create-zzz2j"] Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.721036 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkfpk\" (UniqueName: \"kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk\") pod \"aodh-3e39-account-create-zzz2j\" (UID: \"9d0f3da1-73b0-4f89-898a-4c00b2de52f5\") " pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.826462 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkfpk\" (UniqueName: \"kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk\") pod \"aodh-3e39-account-create-zzz2j\" (UID: \"9d0f3da1-73b0-4f89-898a-4c00b2de52f5\") " pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.845627 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkfpk\" (UniqueName: \"kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk\") pod \"aodh-3e39-account-create-zzz2j\" (UID: \"9d0f3da1-73b0-4f89-898a-4c00b2de52f5\") " pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:53 crc kubenswrapper[4884]: I1128 17:07:53.890489 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:54 crc kubenswrapper[4884]: I1128 17:07:54.364444 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3e39-account-create-zzz2j"] Nov 28 17:07:54 crc kubenswrapper[4884]: W1128 17:07:54.370954 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d0f3da1_73b0_4f89_898a_4c00b2de52f5.slice/crio-9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774 WatchSource:0}: Error finding container 9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774: Status 404 returned error can't find the container with id 9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774 Nov 28 17:07:55 crc kubenswrapper[4884]: I1128 17:07:55.036665 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-snckd"] Nov 28 17:07:55 crc kubenswrapper[4884]: I1128 17:07:55.047378 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-snckd"] Nov 28 17:07:55 crc kubenswrapper[4884]: I1128 17:07:55.080970 4884 generic.go:334] "Generic (PLEG): container finished" podID="9d0f3da1-73b0-4f89-898a-4c00b2de52f5" containerID="e76ca8e6ea27e28b60fffac18f57308a219c3d714d7108f74e12d28e7f04fa9d" exitCode=0 Nov 28 17:07:55 crc kubenswrapper[4884]: I1128 17:07:55.081011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e39-account-create-zzz2j" event={"ID":"9d0f3da1-73b0-4f89-898a-4c00b2de52f5","Type":"ContainerDied","Data":"e76ca8e6ea27e28b60fffac18f57308a219c3d714d7108f74e12d28e7f04fa9d"} Nov 28 17:07:55 crc kubenswrapper[4884]: I1128 17:07:55.081034 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e39-account-create-zzz2j" event={"ID":"9d0f3da1-73b0-4f89-898a-4c00b2de52f5","Type":"ContainerStarted","Data":"9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774"} Nov 28 17:07:56 crc kubenswrapper[4884]: I1128 17:07:56.482955 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:56 crc kubenswrapper[4884]: I1128 17:07:56.584402 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkfpk\" (UniqueName: \"kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk\") pod \"9d0f3da1-73b0-4f89-898a-4c00b2de52f5\" (UID: \"9d0f3da1-73b0-4f89-898a-4c00b2de52f5\") " Nov 28 17:07:56 crc kubenswrapper[4884]: I1128 17:07:56.613322 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk" (OuterVolumeSpecName: "kube-api-access-nkfpk") pod "9d0f3da1-73b0-4f89-898a-4c00b2de52f5" (UID: "9d0f3da1-73b0-4f89-898a-4c00b2de52f5"). InnerVolumeSpecName "kube-api-access-nkfpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:07:56 crc kubenswrapper[4884]: I1128 17:07:56.703143 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkfpk\" (UniqueName: \"kubernetes.io/projected/9d0f3da1-73b0-4f89-898a-4c00b2de52f5-kube-api-access-nkfpk\") on node \"crc\" DevicePath \"\"" Nov 28 17:07:56 crc kubenswrapper[4884]: I1128 17:07:56.791389 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91907454-6e9b-481a-a322-ac1cd7d79ecd" path="/var/lib/kubelet/pods/91907454-6e9b-481a-a322-ac1cd7d79ecd/volumes" Nov 28 17:07:57 crc kubenswrapper[4884]: I1128 17:07:57.097482 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3e39-account-create-zzz2j" event={"ID":"9d0f3da1-73b0-4f89-898a-4c00b2de52f5","Type":"ContainerDied","Data":"9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774"} Nov 28 17:07:57 crc kubenswrapper[4884]: I1128 17:07:57.097531 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e1b3f5f2b448c283aaffe349cbc041fa8b506a824cdb8b4ef2032b1ec5a5774" Nov 28 17:07:57 crc kubenswrapper[4884]: I1128 17:07:57.097594 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3e39-account-create-zzz2j" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.875104 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-2b7rf"] Nov 28 17:07:58 crc kubenswrapper[4884]: E1128 17:07:58.875841 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d0f3da1-73b0-4f89-898a-4c00b2de52f5" containerName="mariadb-account-create" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.875856 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d0f3da1-73b0-4f89-898a-4c00b2de52f5" containerName="mariadb-account-create" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.876075 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d0f3da1-73b0-4f89-898a-4c00b2de52f5" containerName="mariadb-account-create" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.876822 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.880044 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.880150 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.880517 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-77nlv" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.888702 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-2b7rf"] Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.947453 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.947654 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5zv9\" (UniqueName: \"kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.947791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:58 crc kubenswrapper[4884]: I1128 17:07:58.947971 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.050490 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5zv9\" (UniqueName: \"kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.050568 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.050625 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.050737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.057874 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.057900 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.058149 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.075039 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5zv9\" (UniqueName: \"kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9\") pod \"aodh-db-sync-2b7rf\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.242882 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:07:59 crc kubenswrapper[4884]: I1128 17:07:59.729241 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-2b7rf"] Nov 28 17:07:59 crc kubenswrapper[4884]: W1128 17:07:59.731705 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6ea284a_9a80_4e74_af52_1cca813bb0da.slice/crio-b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc WatchSource:0}: Error finding container b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc: Status 404 returned error can't find the container with id b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc Nov 28 17:08:00 crc kubenswrapper[4884]: I1128 17:08:00.127859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2b7rf" event={"ID":"e6ea284a-9a80-4e74-af52-1cca813bb0da","Type":"ContainerStarted","Data":"b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc"} Nov 28 17:08:03 crc kubenswrapper[4884]: I1128 17:08:03.511052 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:08:03 crc kubenswrapper[4884]: I1128 17:08:03.688863 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:08:03 crc kubenswrapper[4884]: E1128 17:08:03.689125 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:08:05 crc kubenswrapper[4884]: I1128 17:08:05.214577 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2b7rf" event={"ID":"e6ea284a-9a80-4e74-af52-1cca813bb0da","Type":"ContainerStarted","Data":"d8c50f43705b7789f5a611eb7940e5f64bd5d16b6415742fc956343c7782b38b"} Nov 28 17:08:05 crc kubenswrapper[4884]: I1128 17:08:05.234281 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-2b7rf" podStartSLOduration=2.3765501159999998 podStartE2EDuration="7.234260839s" podCreationTimestamp="2025-11-28 17:07:58 +0000 UTC" firstStartedPulling="2025-11-28 17:07:59.734506724 +0000 UTC m=+6519.297290525" lastFinishedPulling="2025-11-28 17:08:04.592217447 +0000 UTC m=+6524.155001248" observedRunningTime="2025-11-28 17:08:05.228499808 +0000 UTC m=+6524.791283619" watchObservedRunningTime="2025-11-28 17:08:05.234260839 +0000 UTC m=+6524.797044650" Nov 28 17:08:08 crc kubenswrapper[4884]: I1128 17:08:08.254166 4884 generic.go:334] "Generic (PLEG): container finished" podID="e6ea284a-9a80-4e74-af52-1cca813bb0da" containerID="d8c50f43705b7789f5a611eb7940e5f64bd5d16b6415742fc956343c7782b38b" exitCode=0 Nov 28 17:08:08 crc kubenswrapper[4884]: I1128 17:08:08.254277 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2b7rf" event={"ID":"e6ea284a-9a80-4e74-af52-1cca813bb0da","Type":"ContainerDied","Data":"d8c50f43705b7789f5a611eb7940e5f64bd5d16b6415742fc956343c7782b38b"} Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.674921 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.874286 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts\") pod \"e6ea284a-9a80-4e74-af52-1cca813bb0da\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.874552 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle\") pod \"e6ea284a-9a80-4e74-af52-1cca813bb0da\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.874610 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data\") pod \"e6ea284a-9a80-4e74-af52-1cca813bb0da\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.874645 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5zv9\" (UniqueName: \"kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9\") pod \"e6ea284a-9a80-4e74-af52-1cca813bb0da\" (UID: \"e6ea284a-9a80-4e74-af52-1cca813bb0da\") " Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.888402 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts" (OuterVolumeSpecName: "scripts") pod "e6ea284a-9a80-4e74-af52-1cca813bb0da" (UID: "e6ea284a-9a80-4e74-af52-1cca813bb0da"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.938954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9" (OuterVolumeSpecName: "kube-api-access-p5zv9") pod "e6ea284a-9a80-4e74-af52-1cca813bb0da" (UID: "e6ea284a-9a80-4e74-af52-1cca813bb0da"). InnerVolumeSpecName "kube-api-access-p5zv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.940191 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data" (OuterVolumeSpecName: "config-data") pod "e6ea284a-9a80-4e74-af52-1cca813bb0da" (UID: "e6ea284a-9a80-4e74-af52-1cca813bb0da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.942368 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6ea284a-9a80-4e74-af52-1cca813bb0da" (UID: "e6ea284a-9a80-4e74-af52-1cca813bb0da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.979546 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.979582 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.979592 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5zv9\" (UniqueName: \"kubernetes.io/projected/e6ea284a-9a80-4e74-af52-1cca813bb0da-kube-api-access-p5zv9\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:09 crc kubenswrapper[4884]: I1128 17:08:09.979605 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6ea284a-9a80-4e74-af52-1cca813bb0da-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:10 crc kubenswrapper[4884]: I1128 17:08:10.273505 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-2b7rf" event={"ID":"e6ea284a-9a80-4e74-af52-1cca813bb0da","Type":"ContainerDied","Data":"b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc"} Nov 28 17:08:10 crc kubenswrapper[4884]: I1128 17:08:10.273554 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b864e1f226e2157d7fe821739e55953e907f3a142c828bb8d109cf7e06d689dc" Nov 28 17:08:10 crc kubenswrapper[4884]: I1128 17:08:10.273577 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-2b7rf" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.961268 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 28 17:08:13 crc kubenswrapper[4884]: E1128 17:08:13.962393 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6ea284a-9a80-4e74-af52-1cca813bb0da" containerName="aodh-db-sync" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.962414 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6ea284a-9a80-4e74-af52-1cca813bb0da" containerName="aodh-db-sync" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.962734 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6ea284a-9a80-4e74-af52-1cca813bb0da" containerName="aodh-db-sync" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.965257 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.969530 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.969616 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.969952 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-77nlv" Nov 28 17:08:13 crc kubenswrapper[4884]: I1128 17:08:13.974763 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.062349 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-scripts\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.062695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd79w\" (UniqueName: \"kubernetes.io/projected/a27f8ed5-7890-49f1-9330-9b8c61114002-kube-api-access-rd79w\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.062930 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-config-data\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.063084 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.166582 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-scripts\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.166813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd79w\" (UniqueName: \"kubernetes.io/projected/a27f8ed5-7890-49f1-9330-9b8c61114002-kube-api-access-rd79w\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.166915 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-config-data\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.167011 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.172976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-config-data\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.173817 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-combined-ca-bundle\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.180575 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a27f8ed5-7890-49f1-9330-9b8c61114002-scripts\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.205968 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd79w\" (UniqueName: \"kubernetes.io/projected/a27f8ed5-7890-49f1-9330-9b8c61114002-kube-api-access-rd79w\") pod \"aodh-0\" (UID: \"a27f8ed5-7890-49f1-9330-9b8c61114002\") " pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.283166 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 17:08:14 crc kubenswrapper[4884]: I1128 17:08:14.836371 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 17:08:15 crc kubenswrapper[4884]: I1128 17:08:15.321065 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a27f8ed5-7890-49f1-9330-9b8c61114002","Type":"ContainerStarted","Data":"4e871f52dc351290b211fbe4368c6806d0139a89166a56a67eb5252ad38a18f3"} Nov 28 17:08:15 crc kubenswrapper[4884]: I1128 17:08:15.688607 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:08:15 crc kubenswrapper[4884]: E1128 17:08:15.689214 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.335447 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a27f8ed5-7890-49f1-9330-9b8c61114002","Type":"ContainerStarted","Data":"a660e9fa7fa4a11d3284562cbba482de2b18f59a35262168e69afe3ee982b8cf"} Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.716418 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.716732 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-central-agent" containerID="cri-o://f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13" gracePeriod=30 Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.717064 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-notification-agent" containerID="cri-o://a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8" gracePeriod=30 Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.716975 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="proxy-httpd" containerID="cri-o://98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc" gracePeriod=30 Nov 28 17:08:16 crc kubenswrapper[4884]: I1128 17:08:16.716962 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="sg-core" containerID="cri-o://a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d" gracePeriod=30 Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347753 4884 generic.go:334] "Generic (PLEG): container finished" podID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerID="98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc" exitCode=0 Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347786 4884 generic.go:334] "Generic (PLEG): container finished" podID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerID="a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d" exitCode=2 Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347795 4884 generic.go:334] "Generic (PLEG): container finished" podID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerID="f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13" exitCode=0 Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347814 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerDied","Data":"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc"} Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerDied","Data":"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d"} Nov 28 17:08:17 crc kubenswrapper[4884]: I1128 17:08:17.347849 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerDied","Data":"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13"} Nov 28 17:08:18 crc kubenswrapper[4884]: I1128 17:08:18.373208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a27f8ed5-7890-49f1-9330-9b8c61114002","Type":"ContainerStarted","Data":"65fae6a9e016b88023802d7b4536c1b6918c0ffc749bde9b7f90534881753f35"} Nov 28 17:08:19 crc kubenswrapper[4884]: I1128 17:08:19.387359 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a27f8ed5-7890-49f1-9330-9b8c61114002","Type":"ContainerStarted","Data":"acbe6d879a061fe61799f284b16b5072528bd8a0ac67c9ca5bf64c12dedb21ec"} Nov 28 17:08:20 crc kubenswrapper[4884]: I1128 17:08:20.906493 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.023791 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.023911 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.023957 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024135 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024198 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sldrx\" (UniqueName: \"kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx\") pod \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\" (UID: \"15494225-080f-42ad-a8a5-aa0bfc2a52a6\") " Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024394 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.024512 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.025019 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.025038 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/15494225-080f-42ad-a8a5-aa0bfc2a52a6-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.029691 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx" (OuterVolumeSpecName: "kube-api-access-sldrx") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "kube-api-access-sldrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.029944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts" (OuterVolumeSpecName: "scripts") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.052624 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.103396 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.128057 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.128103 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.128115 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sldrx\" (UniqueName: \"kubernetes.io/projected/15494225-080f-42ad-a8a5-aa0bfc2a52a6-kube-api-access-sldrx\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.128128 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.131442 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data" (OuterVolumeSpecName: "config-data") pod "15494225-080f-42ad-a8a5-aa0bfc2a52a6" (UID: "15494225-080f-42ad-a8a5-aa0bfc2a52a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.230512 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15494225-080f-42ad-a8a5-aa0bfc2a52a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.408881 4884 generic.go:334] "Generic (PLEG): container finished" podID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerID="a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8" exitCode=0 Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.408965 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerDied","Data":"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8"} Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.409003 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"15494225-080f-42ad-a8a5-aa0bfc2a52a6","Type":"ContainerDied","Data":"7556895db1fbfe19e217085769c1bac77038ce70a783538d8e963cf4664afd6a"} Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.409015 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.409029 4884 scope.go:117] "RemoveContainer" containerID="98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.424912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"a27f8ed5-7890-49f1-9330-9b8c61114002","Type":"ContainerStarted","Data":"b747ae598d3a9b6c73f33dd365ed2d9fedf40226fa09ecf8456908c7be2fad6a"} Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.462488 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.488011 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.505405 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.856727777 podStartE2EDuration="8.505381587s" podCreationTimestamp="2025-11-28 17:08:13 +0000 UTC" firstStartedPulling="2025-11-28 17:08:14.852596168 +0000 UTC m=+6534.415379979" lastFinishedPulling="2025-11-28 17:08:20.501249988 +0000 UTC m=+6540.064033789" observedRunningTime="2025-11-28 17:08:21.462655045 +0000 UTC m=+6541.025438856" watchObservedRunningTime="2025-11-28 17:08:21.505381587 +0000 UTC m=+6541.068165388" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508241 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.508737 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-central-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508749 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-central-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.508765 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="proxy-httpd" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508771 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="proxy-httpd" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.508788 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-notification-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508794 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-notification-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.508807 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="sg-core" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508813 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="sg-core" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.508989 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-notification-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.509003 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="sg-core" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.509018 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="proxy-httpd" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.509031 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" containerName="ceilometer-central-agent" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.511189 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.518622 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.518921 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.527363 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.551209 4884 scope.go:117] "RemoveContainer" containerID="a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.603476 4884 scope.go:117] "RemoveContainer" containerID="a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.641984 4884 scope.go:117] "RemoveContainer" containerID="f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656238 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656447 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656519 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656556 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.656725 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsdlg\" (UniqueName: \"kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.665958 4884 scope.go:117] "RemoveContainer" containerID="98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.666674 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc\": container with ID starting with 98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc not found: ID does not exist" containerID="98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.666783 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc"} err="failed to get container status \"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc\": rpc error: code = NotFound desc = could not find container \"98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc\": container with ID starting with 98b2a9dd761a12e67f1424eadb490ba5d5212e8d64df3f6759d100ba67a5b4cc not found: ID does not exist" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.666874 4884 scope.go:117] "RemoveContainer" containerID="a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.667302 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d\": container with ID starting with a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d not found: ID does not exist" containerID="a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.667969 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d"} err="failed to get container status \"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d\": rpc error: code = NotFound desc = could not find container \"a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d\": container with ID starting with a0c37bfea0b25bdca90a15b418a6f97d6e33bcf38e55511db0adc5efcec9476d not found: ID does not exist" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.668078 4884 scope.go:117] "RemoveContainer" containerID="a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.668758 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8\": container with ID starting with a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8 not found: ID does not exist" containerID="a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.668801 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8"} err="failed to get container status \"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8\": rpc error: code = NotFound desc = could not find container \"a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8\": container with ID starting with a22ba1518219bf0e8688b1286951d97ba97c89f2f5840763a432feb30fa431b8 not found: ID does not exist" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.668828 4884 scope.go:117] "RemoveContainer" containerID="f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13" Nov 28 17:08:21 crc kubenswrapper[4884]: E1128 17:08:21.669377 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13\": container with ID starting with f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13 not found: ID does not exist" containerID="f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.669486 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13"} err="failed to get container status \"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13\": rpc error: code = NotFound desc = could not find container \"f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13\": container with ID starting with f32e2dcfbfd075e396991296f974161a858cfff14a2139982b39fb0cc37c9a13 not found: ID does not exist" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.758819 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759146 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759325 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759431 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsdlg\" (UniqueName: \"kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759529 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759619 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.759728 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.760169 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.760183 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.763672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.763708 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.764232 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.764336 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.784103 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsdlg\" (UniqueName: \"kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg\") pod \"ceilometer-0\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " pod="openstack/ceilometer-0" Nov 28 17:08:21 crc kubenswrapper[4884]: I1128 17:08:21.900763 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:08:22 crc kubenswrapper[4884]: I1128 17:08:22.417744 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:08:22 crc kubenswrapper[4884]: W1128 17:08:22.427501 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8710527d_1641_4d8c_b5e8_5ebe92677006.slice/crio-b8e5dbb862d45249b27fe14ae984bbccc935c379f5da0e8f3e6b245e62fbadf1 WatchSource:0}: Error finding container b8e5dbb862d45249b27fe14ae984bbccc935c379f5da0e8f3e6b245e62fbadf1: Status 404 returned error can't find the container with id b8e5dbb862d45249b27fe14ae984bbccc935c379f5da0e8f3e6b245e62fbadf1 Nov 28 17:08:22 crc kubenswrapper[4884]: I1128 17:08:22.440336 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerStarted","Data":"b8e5dbb862d45249b27fe14ae984bbccc935c379f5da0e8f3e6b245e62fbadf1"} Nov 28 17:08:22 crc kubenswrapper[4884]: I1128 17:08:22.729042 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15494225-080f-42ad-a8a5-aa0bfc2a52a6" path="/var/lib/kubelet/pods/15494225-080f-42ad-a8a5-aa0bfc2a52a6/volumes" Nov 28 17:08:23 crc kubenswrapper[4884]: I1128 17:08:23.465666 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerStarted","Data":"b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010"} Nov 28 17:08:24 crc kubenswrapper[4884]: I1128 17:08:24.477183 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerStarted","Data":"5f7e3787e7104ffc5911bafaf0bf06e955fdde5ea2a18b9dba242af3416c73b9"} Nov 28 17:08:25 crc kubenswrapper[4884]: I1128 17:08:25.490556 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerStarted","Data":"1e0cef010a7066c32448d1ded41921320a4daddccc6a3858798f614a47aea74b"} Nov 28 17:08:27 crc kubenswrapper[4884]: I1128 17:08:27.521317 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerStarted","Data":"cccaeb6f948ecd1135f6da8d601c705581e047487aba53889c93aaf3130a90a3"} Nov 28 17:08:27 crc kubenswrapper[4884]: I1128 17:08:27.522737 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:08:27 crc kubenswrapper[4884]: I1128 17:08:27.553811 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.591802466 podStartE2EDuration="6.553793128s" podCreationTimestamp="2025-11-28 17:08:21 +0000 UTC" firstStartedPulling="2025-11-28 17:08:22.429813634 +0000 UTC m=+6541.992597435" lastFinishedPulling="2025-11-28 17:08:26.391804296 +0000 UTC m=+6545.954588097" observedRunningTime="2025-11-28 17:08:27.544848857 +0000 UTC m=+6547.107632668" watchObservedRunningTime="2025-11-28 17:08:27.553793128 +0000 UTC m=+6547.116576929" Nov 28 17:08:27 crc kubenswrapper[4884]: I1128 17:08:27.991436 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-vskr8"] Nov 28 17:08:27 crc kubenswrapper[4884]: I1128 17:08:27.993393 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-vskr8" Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.002964 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-vskr8"] Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.118465 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcgrh\" (UniqueName: \"kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh\") pod \"manila-db-create-vskr8\" (UID: \"39b37ab6-a0b0-40f6-9e11-72c135a17ed9\") " pod="openstack/manila-db-create-vskr8" Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.220582 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcgrh\" (UniqueName: \"kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh\") pod \"manila-db-create-vskr8\" (UID: \"39b37ab6-a0b0-40f6-9e11-72c135a17ed9\") " pod="openstack/manila-db-create-vskr8" Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.241275 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcgrh\" (UniqueName: \"kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh\") pod \"manila-db-create-vskr8\" (UID: \"39b37ab6-a0b0-40f6-9e11-72c135a17ed9\") " pod="openstack/manila-db-create-vskr8" Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.350480 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-vskr8" Nov 28 17:08:28 crc kubenswrapper[4884]: I1128 17:08:28.976301 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-vskr8"] Nov 28 17:08:29 crc kubenswrapper[4884]: W1128 17:08:29.020559 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39b37ab6_a0b0_40f6_9e11_72c135a17ed9.slice/crio-669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59 WatchSource:0}: Error finding container 669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59: Status 404 returned error can't find the container with id 669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59 Nov 28 17:08:29 crc kubenswrapper[4884]: I1128 17:08:29.544275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-vskr8" event={"ID":"39b37ab6-a0b0-40f6-9e11-72c135a17ed9","Type":"ContainerStarted","Data":"e0bd2370da465b7d17abca4d491f69548acb5e329c2d4e7a86cf6c878c6a3eb5"} Nov 28 17:08:29 crc kubenswrapper[4884]: I1128 17:08:29.544636 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-vskr8" event={"ID":"39b37ab6-a0b0-40f6-9e11-72c135a17ed9","Type":"ContainerStarted","Data":"669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59"} Nov 28 17:08:29 crc kubenswrapper[4884]: I1128 17:08:29.690566 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:08:29 crc kubenswrapper[4884]: E1128 17:08:29.690825 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:08:30 crc kubenswrapper[4884]: I1128 17:08:30.557998 4884 generic.go:334] "Generic (PLEG): container finished" podID="39b37ab6-a0b0-40f6-9e11-72c135a17ed9" containerID="e0bd2370da465b7d17abca4d491f69548acb5e329c2d4e7a86cf6c878c6a3eb5" exitCode=0 Nov 28 17:08:30 crc kubenswrapper[4884]: I1128 17:08:30.558079 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-vskr8" event={"ID":"39b37ab6-a0b0-40f6-9e11-72c135a17ed9","Type":"ContainerDied","Data":"e0bd2370da465b7d17abca4d491f69548acb5e329c2d4e7a86cf6c878c6a3eb5"} Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.054385 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-vskr8" Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.112820 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcgrh\" (UniqueName: \"kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh\") pod \"39b37ab6-a0b0-40f6-9e11-72c135a17ed9\" (UID: \"39b37ab6-a0b0-40f6-9e11-72c135a17ed9\") " Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.119182 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh" (OuterVolumeSpecName: "kube-api-access-kcgrh") pod "39b37ab6-a0b0-40f6-9e11-72c135a17ed9" (UID: "39b37ab6-a0b0-40f6-9e11-72c135a17ed9"). InnerVolumeSpecName "kube-api-access-kcgrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.215482 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcgrh\" (UniqueName: \"kubernetes.io/projected/39b37ab6-a0b0-40f6-9e11-72c135a17ed9-kube-api-access-kcgrh\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.585647 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-vskr8" event={"ID":"39b37ab6-a0b0-40f6-9e11-72c135a17ed9","Type":"ContainerDied","Data":"669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59"} Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.585849 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="669944bb1b856722b65825a23cb8bf8fd54ebb59b44e44ff973bb755805b9e59" Nov 28 17:08:32 crc kubenswrapper[4884]: I1128 17:08:32.585743 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-vskr8" Nov 28 17:08:35 crc kubenswrapper[4884]: I1128 17:08:35.348353 4884 scope.go:117] "RemoveContainer" containerID="a2faa47b1e3aa901f2b2fe29590a0341f6369f79fd5dcb4b3af6858ff28311ec" Nov 28 17:08:35 crc kubenswrapper[4884]: I1128 17:08:35.394621 4884 scope.go:117] "RemoveContainer" containerID="fc20a08b5191c7333e5a3e858e4d4d8a582b962b059464136ee62f3a10659758" Nov 28 17:08:35 crc kubenswrapper[4884]: I1128 17:08:35.436169 4884 scope.go:117] "RemoveContainer" containerID="8ce412ae0f0c16f54c8c804e892361de64e404155e36f25b0aec81f4ac522b5e" Nov 28 17:08:43 crc kubenswrapper[4884]: I1128 17:08:43.689146 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:08:43 crc kubenswrapper[4884]: E1128 17:08:43.689926 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.030866 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-2c0c-account-create-lxc5k"] Nov 28 17:08:48 crc kubenswrapper[4884]: E1128 17:08:48.032811 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b37ab6-a0b0-40f6-9e11-72c135a17ed9" containerName="mariadb-database-create" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.032850 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b37ab6-a0b0-40f6-9e11-72c135a17ed9" containerName="mariadb-database-create" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.033064 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="39b37ab6-a0b0-40f6-9e11-72c135a17ed9" containerName="mariadb-database-create" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.034033 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.042478 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.044115 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2c0c-account-create-lxc5k"] Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.160666 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mxw7\" (UniqueName: \"kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7\") pod \"manila-2c0c-account-create-lxc5k\" (UID: \"241df297-4724-41e5-b16d-8399fa93d3ee\") " pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.263803 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mxw7\" (UniqueName: \"kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7\") pod \"manila-2c0c-account-create-lxc5k\" (UID: \"241df297-4724-41e5-b16d-8399fa93d3ee\") " pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.287850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mxw7\" (UniqueName: \"kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7\") pod \"manila-2c0c-account-create-lxc5k\" (UID: \"241df297-4724-41e5-b16d-8399fa93d3ee\") " pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.362897 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:48 crc kubenswrapper[4884]: I1128 17:08:48.913512 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-2c0c-account-create-lxc5k"] Nov 28 17:08:49 crc kubenswrapper[4884]: I1128 17:08:49.751859 4884 generic.go:334] "Generic (PLEG): container finished" podID="241df297-4724-41e5-b16d-8399fa93d3ee" containerID="bbe8319175ab437e27d9d549737b901d079bd54ae2d038514e5f968717370d07" exitCode=0 Nov 28 17:08:49 crc kubenswrapper[4884]: I1128 17:08:49.751913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c0c-account-create-lxc5k" event={"ID":"241df297-4724-41e5-b16d-8399fa93d3ee","Type":"ContainerDied","Data":"bbe8319175ab437e27d9d549737b901d079bd54ae2d038514e5f968717370d07"} Nov 28 17:08:49 crc kubenswrapper[4884]: I1128 17:08:49.752219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c0c-account-create-lxc5k" event={"ID":"241df297-4724-41e5-b16d-8399fa93d3ee","Type":"ContainerStarted","Data":"8b88a270019ff1ca7b48680fe14c60ffe947b5905a504ca7b1da7a171433ecf5"} Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.206208 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.332640 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mxw7\" (UniqueName: \"kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7\") pod \"241df297-4724-41e5-b16d-8399fa93d3ee\" (UID: \"241df297-4724-41e5-b16d-8399fa93d3ee\") " Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.348793 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7" (OuterVolumeSpecName: "kube-api-access-5mxw7") pod "241df297-4724-41e5-b16d-8399fa93d3ee" (UID: "241df297-4724-41e5-b16d-8399fa93d3ee"). InnerVolumeSpecName "kube-api-access-5mxw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.436182 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mxw7\" (UniqueName: \"kubernetes.io/projected/241df297-4724-41e5-b16d-8399fa93d3ee-kube-api-access-5mxw7\") on node \"crc\" DevicePath \"\"" Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.773989 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-2c0c-account-create-lxc5k" event={"ID":"241df297-4724-41e5-b16d-8399fa93d3ee","Type":"ContainerDied","Data":"8b88a270019ff1ca7b48680fe14c60ffe947b5905a504ca7b1da7a171433ecf5"} Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.774284 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b88a270019ff1ca7b48680fe14c60ffe947b5905a504ca7b1da7a171433ecf5" Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.774061 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-2c0c-account-create-lxc5k" Nov 28 17:08:51 crc kubenswrapper[4884]: I1128 17:08:51.907402 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.294962 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-q4rsb"] Nov 28 17:08:53 crc kubenswrapper[4884]: E1128 17:08:53.296475 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241df297-4724-41e5-b16d-8399fa93d3ee" containerName="mariadb-account-create" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.296590 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="241df297-4724-41e5-b16d-8399fa93d3ee" containerName="mariadb-account-create" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.296859 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="241df297-4724-41e5-b16d-8399fa93d3ee" containerName="mariadb-account-create" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.297831 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.301227 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.301452 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-2td6s" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.312358 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-q4rsb"] Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.379155 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.379317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc6q4\" (UniqueName: \"kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.379522 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.379576 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.482003 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.482184 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc6q4\" (UniqueName: \"kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.482297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.482360 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.487332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.487610 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.487868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.501133 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc6q4\" (UniqueName: \"kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4\") pod \"manila-db-sync-q4rsb\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:53 crc kubenswrapper[4884]: I1128 17:08:53.660659 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-q4rsb" Nov 28 17:08:54 crc kubenswrapper[4884]: I1128 17:08:54.654684 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-q4rsb"] Nov 28 17:08:54 crc kubenswrapper[4884]: I1128 17:08:54.837541 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-q4rsb" event={"ID":"47b54110-28ef-40c8-ab06-9be4e188f54f","Type":"ContainerStarted","Data":"6896b397926b7c9053fad3fad3690d52f9fd92f873a0c08389461e2cff5d5fe0"} Nov 28 17:08:55 crc kubenswrapper[4884]: I1128 17:08:55.688402 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:08:55 crc kubenswrapper[4884]: E1128 17:08:55.689277 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:08:59 crc kubenswrapper[4884]: I1128 17:08:59.920357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-q4rsb" event={"ID":"47b54110-28ef-40c8-ab06-9be4e188f54f","Type":"ContainerStarted","Data":"6c0d8b7ceda2febdb3de715a6b072849dbddb5e9bd2bfe128341ba580f63c325"} Nov 28 17:08:59 crc kubenswrapper[4884]: I1128 17:08:59.950133 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-q4rsb" podStartSLOduration=2.1821956240000002 podStartE2EDuration="6.95011116s" podCreationTimestamp="2025-11-28 17:08:53 +0000 UTC" firstStartedPulling="2025-11-28 17:08:54.664008028 +0000 UTC m=+6574.226791829" lastFinishedPulling="2025-11-28 17:08:59.431923544 +0000 UTC m=+6578.994707365" observedRunningTime="2025-11-28 17:08:59.936741611 +0000 UTC m=+6579.499525402" watchObservedRunningTime="2025-11-28 17:08:59.95011116 +0000 UTC m=+6579.512894971" Nov 28 17:09:01 crc kubenswrapper[4884]: I1128 17:09:01.948465 4884 generic.go:334] "Generic (PLEG): container finished" podID="47b54110-28ef-40c8-ab06-9be4e188f54f" containerID="6c0d8b7ceda2febdb3de715a6b072849dbddb5e9bd2bfe128341ba580f63c325" exitCode=0 Nov 28 17:09:01 crc kubenswrapper[4884]: I1128 17:09:01.948616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-q4rsb" event={"ID":"47b54110-28ef-40c8-ab06-9be4e188f54f","Type":"ContainerDied","Data":"6c0d8b7ceda2febdb3de715a6b072849dbddb5e9bd2bfe128341ba580f63c325"} Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.433737 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-q4rsb" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.510039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jc6q4\" (UniqueName: \"kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4\") pod \"47b54110-28ef-40c8-ab06-9be4e188f54f\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.510422 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle\") pod \"47b54110-28ef-40c8-ab06-9be4e188f54f\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.510511 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data\") pod \"47b54110-28ef-40c8-ab06-9be4e188f54f\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.510632 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data\") pod \"47b54110-28ef-40c8-ab06-9be4e188f54f\" (UID: \"47b54110-28ef-40c8-ab06-9be4e188f54f\") " Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.515680 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "47b54110-28ef-40c8-ab06-9be4e188f54f" (UID: "47b54110-28ef-40c8-ab06-9be4e188f54f"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.516450 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4" (OuterVolumeSpecName: "kube-api-access-jc6q4") pod "47b54110-28ef-40c8-ab06-9be4e188f54f" (UID: "47b54110-28ef-40c8-ab06-9be4e188f54f"). InnerVolumeSpecName "kube-api-access-jc6q4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.534251 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data" (OuterVolumeSpecName: "config-data") pod "47b54110-28ef-40c8-ab06-9be4e188f54f" (UID: "47b54110-28ef-40c8-ab06-9be4e188f54f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.551056 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47b54110-28ef-40c8-ab06-9be4e188f54f" (UID: "47b54110-28ef-40c8-ab06-9be4e188f54f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.612604 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.612640 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jc6q4\" (UniqueName: \"kubernetes.io/projected/47b54110-28ef-40c8-ab06-9be4e188f54f-kube-api-access-jc6q4\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.612651 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.612659 4884 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/47b54110-28ef-40c8-ab06-9be4e188f54f-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.973600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-q4rsb" event={"ID":"47b54110-28ef-40c8-ab06-9be4e188f54f","Type":"ContainerDied","Data":"6896b397926b7c9053fad3fad3690d52f9fd92f873a0c08389461e2cff5d5fe0"} Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.973656 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6896b397926b7c9053fad3fad3690d52f9fd92f873a0c08389461e2cff5d5fe0" Nov 28 17:09:03 crc kubenswrapper[4884]: I1128 17:09:03.973720 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-q4rsb" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.425543 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: E1128 17:09:04.426545 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47b54110-28ef-40c8-ab06-9be4e188f54f" containerName="manila-db-sync" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.426569 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="47b54110-28ef-40c8-ab06-9be4e188f54f" containerName="manila-db-sync" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.426852 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="47b54110-28ef-40c8-ab06-9be4e188f54f" containerName="manila-db-sync" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.428329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.435761 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.435843 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.435773 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-2td6s" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.436192 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.478144 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.480195 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.485540 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.509057 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542366 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4qh6\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-kube-api-access-q4qh6\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542470 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542508 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542621 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4frk\" (UniqueName: \"kubernetes.io/projected/0e04e709-fc63-46e0-9659-7907d2af5dc6-kube-api-access-n4frk\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542682 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542731 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542765 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542805 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e04e709-fc63-46e0-9659-7907d2af5dc6-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542862 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-scripts\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-ceph\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.542963 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-scripts\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.545560 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.611229 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.612969 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644548 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644654 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4frk\" (UniqueName: \"kubernetes.io/projected/0e04e709-fc63-46e0-9659-7907d2af5dc6-kube-api-access-n4frk\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644688 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644745 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644765 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644790 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9ssk\" (UniqueName: \"kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644820 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644879 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e04e709-fc63-46e0-9659-7907d2af5dc6-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644900 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-scripts\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.644991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-ceph\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-scripts\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645101 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4qh6\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-kube-api-access-q4qh6\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645124 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645160 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645193 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.645293 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.647230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0e04e709-fc63-46e0-9659-7907d2af5dc6-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.648028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.670610 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.679745 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.681572 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-scripts\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.685827 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.688396 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.691021 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4qh6\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-kube-api-access-q4qh6\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.695654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e04e709-fc63-46e0-9659-7907d2af5dc6-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.721239 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-ceph\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.721504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-scripts\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.721648 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/02299b28-a3d4-47f3-8b14-7bc4bd0ebc53-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53\") " pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.723211 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4frk\" (UniqueName: \"kubernetes.io/projected/0e04e709-fc63-46e0-9659-7907d2af5dc6-kube-api-access-n4frk\") pod \"manila-scheduler-0\" (UID: \"0e04e709-fc63-46e0-9659-7907d2af5dc6\") " pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.749280 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.749458 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.749577 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.749598 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.749616 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9ssk\" (UniqueName: \"kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.751979 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.752694 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.753418 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.758623 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.759854 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.760006 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.766169 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.767978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.775400 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.798708 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.825747 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.847147 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9ssk\" (UniqueName: \"kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk\") pod \"dnsmasq-dns-d4bd768c7-j8zqz\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.851458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rc9z\" (UniqueName: \"kubernetes.io/projected/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-kube-api-access-5rc9z\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.851535 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-scripts\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.852952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.853699 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-logs\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.853894 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data-custom\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.854201 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-etc-machine-id\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.854466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.943753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.955885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-scripts\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.955948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.955979 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-logs\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.956024 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data-custom\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.956071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-etc-machine-id\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.956164 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.956187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rc9z\" (UniqueName: \"kubernetes.io/projected/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-kube-api-access-5rc9z\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.958204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-etc-machine-id\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.958999 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-logs\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.964739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data-custom\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.965365 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-scripts\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.965792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-config-data\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.968664 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:04 crc kubenswrapper[4884]: I1128 17:09:04.978756 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rc9z\" (UniqueName: \"kubernetes.io/projected/086b0687-4bf9-4a0c-abad-d7bff8dbedc3-kube-api-access-5rc9z\") pod \"manila-api-0\" (UID: \"086b0687-4bf9-4a0c-abad-d7bff8dbedc3\") " pod="openstack/manila-api-0" Nov 28 17:09:05 crc kubenswrapper[4884]: I1128 17:09:05.015983 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 17:09:05 crc kubenswrapper[4884]: I1128 17:09:05.844356 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 17:09:05 crc kubenswrapper[4884]: I1128 17:09:05.898124 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 17:09:06 crc kubenswrapper[4884]: I1128 17:09:06.002044 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0e04e709-fc63-46e0-9659-7907d2af5dc6","Type":"ContainerStarted","Data":"6ae932be3d81c24085058c6dfa6af1b6df9d93c7c343abefd9f0fc795f305318"} Nov 28 17:09:06 crc kubenswrapper[4884]: I1128 17:09:06.008866 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53","Type":"ContainerStarted","Data":"1697b43b3d4b16ee0fe770c50df83a83ebe423835cd568879767afd42a53614d"} Nov 28 17:09:06 crc kubenswrapper[4884]: I1128 17:09:06.413777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:09:06 crc kubenswrapper[4884]: W1128 17:09:06.449824 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ce738ae_a3e7_43e9_807f_f9958db5285b.slice/crio-13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b WatchSource:0}: Error finding container 13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b: Status 404 returned error can't find the container with id 13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b Nov 28 17:09:06 crc kubenswrapper[4884]: I1128 17:09:06.533864 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 17:09:07 crc kubenswrapper[4884]: I1128 17:09:07.020422 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"086b0687-4bf9-4a0c-abad-d7bff8dbedc3","Type":"ContainerStarted","Data":"7e5a0d1511da2247aa90d4c9b10f4f906e2c01ebe9fed7e482e207f37578e881"} Nov 28 17:09:07 crc kubenswrapper[4884]: I1128 17:09:07.022728 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" event={"ID":"9ce738ae-a3e7-43e9-807f-f9958db5285b","Type":"ContainerStarted","Data":"13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b"} Nov 28 17:09:08 crc kubenswrapper[4884]: I1128 17:09:08.041324 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0e04e709-fc63-46e0-9659-7907d2af5dc6","Type":"ContainerStarted","Data":"89d0dfca609505e19fb6c2d488d403ba044bc0fec55e6a6305386480e5d64edc"} Nov 28 17:09:08 crc kubenswrapper[4884]: I1128 17:09:08.042612 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"086b0687-4bf9-4a0c-abad-d7bff8dbedc3","Type":"ContainerStarted","Data":"370203c880399bfdea1491554cf8c28f511fea40349fcf4a57af51b871a0efe8"} Nov 28 17:09:08 crc kubenswrapper[4884]: I1128 17:09:08.044027 4884 generic.go:334] "Generic (PLEG): container finished" podID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerID="157f219657269804548696e47f3d7a47b90df1b11d0f012d7122bcb78228f1d6" exitCode=0 Nov 28 17:09:08 crc kubenswrapper[4884]: I1128 17:09:08.044056 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" event={"ID":"9ce738ae-a3e7-43e9-807f-f9958db5285b","Type":"ContainerDied","Data":"157f219657269804548696e47f3d7a47b90df1b11d0f012d7122bcb78228f1d6"} Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.060572 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"086b0687-4bf9-4a0c-abad-d7bff8dbedc3","Type":"ContainerStarted","Data":"3464f3612166f6a8e88cf3c17268fcd3856dc70531f2c1f6892e09acd56ccb91"} Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.061119 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.064574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" event={"ID":"9ce738ae-a3e7-43e9-807f-f9958db5285b","Type":"ContainerStarted","Data":"00f3c8eccd33dfd89a0a6b0162595ff988b382176230dd266b653e08a4965570"} Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.064639 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.071378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"0e04e709-fc63-46e0-9659-7907d2af5dc6","Type":"ContainerStarted","Data":"9657f0e0b5ef46be1dfb506d8fb194b8b2fdc73e790442a4a018319a42372a16"} Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.083907 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=5.083887822 podStartE2EDuration="5.083887822s" podCreationTimestamp="2025-11-28 17:09:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:09:09.081138084 +0000 UTC m=+6588.643921885" watchObservedRunningTime="2025-11-28 17:09:09.083887822 +0000 UTC m=+6588.646671623" Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.118721 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=4.319719256 podStartE2EDuration="5.118702978s" podCreationTimestamp="2025-11-28 17:09:04 +0000 UTC" firstStartedPulling="2025-11-28 17:09:05.878255524 +0000 UTC m=+6585.441039325" lastFinishedPulling="2025-11-28 17:09:06.677239236 +0000 UTC m=+6586.240023047" observedRunningTime="2025-11-28 17:09:09.109513832 +0000 UTC m=+6588.672297633" watchObservedRunningTime="2025-11-28 17:09:09.118702978 +0000 UTC m=+6588.681486769" Nov 28 17:09:09 crc kubenswrapper[4884]: I1128 17:09:09.142811 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" podStartSLOduration=5.14278963 podStartE2EDuration="5.14278963s" podCreationTimestamp="2025-11-28 17:09:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:09:09.131678427 +0000 UTC m=+6588.694462258" watchObservedRunningTime="2025-11-28 17:09:09.14278963 +0000 UTC m=+6588.705573431" Nov 28 17:09:10 crc kubenswrapper[4884]: I1128 17:09:10.696443 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:09:10 crc kubenswrapper[4884]: E1128 17:09:10.697244 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:09:14 crc kubenswrapper[4884]: I1128 17:09:14.826944 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 28 17:09:14 crc kubenswrapper[4884]: I1128 17:09:14.946692 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.024937 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.025212 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="dnsmasq-dns" containerID="cri-o://7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c" gracePeriod=10 Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.144812 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53","Type":"ContainerStarted","Data":"32ecd39774f5fa33639603ad97c879e7c57e6d335e40381a5d848b262b960829"} Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.549369 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.641432 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb\") pod \"c104c909-309e-4223-9d7e-4219963e167e\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.641556 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb\") pod \"c104c909-309e-4223-9d7e-4219963e167e\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.641849 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjxgj\" (UniqueName: \"kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj\") pod \"c104c909-309e-4223-9d7e-4219963e167e\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.641888 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc\") pod \"c104c909-309e-4223-9d7e-4219963e167e\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.641936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config\") pod \"c104c909-309e-4223-9d7e-4219963e167e\" (UID: \"c104c909-309e-4223-9d7e-4219963e167e\") " Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.654572 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj" (OuterVolumeSpecName: "kube-api-access-xjxgj") pod "c104c909-309e-4223-9d7e-4219963e167e" (UID: "c104c909-309e-4223-9d7e-4219963e167e"). InnerVolumeSpecName "kube-api-access-xjxgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.712792 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c104c909-309e-4223-9d7e-4219963e167e" (UID: "c104c909-309e-4223-9d7e-4219963e167e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.713239 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config" (OuterVolumeSpecName: "config") pod "c104c909-309e-4223-9d7e-4219963e167e" (UID: "c104c909-309e-4223-9d7e-4219963e167e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.715805 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c104c909-309e-4223-9d7e-4219963e167e" (UID: "c104c909-309e-4223-9d7e-4219963e167e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.727953 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c104c909-309e-4223-9d7e-4219963e167e" (UID: "c104c909-309e-4223-9d7e-4219963e167e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.744650 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjxgj\" (UniqueName: \"kubernetes.io/projected/c104c909-309e-4223-9d7e-4219963e167e-kube-api-access-xjxgj\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.744684 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.744697 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.746626 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:15 crc kubenswrapper[4884]: I1128 17:09:15.746653 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c104c909-309e-4223-9d7e-4219963e167e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.155704 4884 generic.go:334] "Generic (PLEG): container finished" podID="c104c909-309e-4223-9d7e-4219963e167e" containerID="7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c" exitCode=0 Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.155764 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.155770 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" event={"ID":"c104c909-309e-4223-9d7e-4219963e167e","Type":"ContainerDied","Data":"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c"} Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.155892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-554cf9d48c-cltfv" event={"ID":"c104c909-309e-4223-9d7e-4219963e167e","Type":"ContainerDied","Data":"a0aaa50c58a8c6a34158e30087dd913427fffcdac1913e60a7dfbc608c7a56cc"} Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.155950 4884 scope.go:117] "RemoveContainer" containerID="7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.157907 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"02299b28-a3d4-47f3-8b14-7bc4bd0ebc53","Type":"ContainerStarted","Data":"5a3142ef8f2b0848ea16ad1cfc728e91e7df0dcf117dfb43843faa68e6e4ab28"} Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.195502 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.575368626 podStartE2EDuration="12.195482823s" podCreationTimestamp="2025-11-28 17:09:04 +0000 UTC" firstStartedPulling="2025-11-28 17:09:05.856934239 +0000 UTC m=+6585.419718040" lastFinishedPulling="2025-11-28 17:09:14.477048426 +0000 UTC m=+6594.039832237" observedRunningTime="2025-11-28 17:09:16.185550419 +0000 UTC m=+6595.748334220" watchObservedRunningTime="2025-11-28 17:09:16.195482823 +0000 UTC m=+6595.758266624" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.202811 4884 scope.go:117] "RemoveContainer" containerID="4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.214415 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.226243 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-554cf9d48c-cltfv"] Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.252552 4884 scope.go:117] "RemoveContainer" containerID="7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c" Nov 28 17:09:16 crc kubenswrapper[4884]: E1128 17:09:16.252998 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c\": container with ID starting with 7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c not found: ID does not exist" containerID="7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.253050 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c"} err="failed to get container status \"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c\": rpc error: code = NotFound desc = could not find container \"7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c\": container with ID starting with 7fafa51ec4f3fe1620cb1743fb440b2876ac06fe9334af488da93e8aff1fff0c not found: ID does not exist" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.253083 4884 scope.go:117] "RemoveContainer" containerID="4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0" Nov 28 17:09:16 crc kubenswrapper[4884]: E1128 17:09:16.254163 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0\": container with ID starting with 4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0 not found: ID does not exist" containerID="4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.254202 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0"} err="failed to get container status \"4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0\": rpc error: code = NotFound desc = could not find container \"4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0\": container with ID starting with 4ffdb7fe948ca69da6904aa3e2458453bbf61c867f2b9c78e9314ee07e3fbba0 not found: ID does not exist" Nov 28 17:09:16 crc kubenswrapper[4884]: I1128 17:09:16.704662 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c104c909-309e-4223-9d7e-4219963e167e" path="/var/lib/kubelet/pods/c104c909-309e-4223-9d7e-4219963e167e/volumes" Nov 28 17:09:17 crc kubenswrapper[4884]: I1128 17:09:17.532900 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:17 crc kubenswrapper[4884]: I1128 17:09:17.533502 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-central-agent" containerID="cri-o://b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010" gracePeriod=30 Nov 28 17:09:17 crc kubenswrapper[4884]: I1128 17:09:17.533556 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="sg-core" containerID="cri-o://1e0cef010a7066c32448d1ded41921320a4daddccc6a3858798f614a47aea74b" gracePeriod=30 Nov 28 17:09:17 crc kubenswrapper[4884]: I1128 17:09:17.533581 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-notification-agent" containerID="cri-o://5f7e3787e7104ffc5911bafaf0bf06e955fdde5ea2a18b9dba242af3416c73b9" gracePeriod=30 Nov 28 17:09:17 crc kubenswrapper[4884]: I1128 17:09:17.533686 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="proxy-httpd" containerID="cri-o://cccaeb6f948ecd1135f6da8d601c705581e047487aba53889c93aaf3130a90a3" gracePeriod=30 Nov 28 17:09:17 crc kubenswrapper[4884]: E1128 17:09:17.979579 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8710527d_1641_4d8c_b5e8_5ebe92677006.slice/crio-conmon-b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187362 4884 generic.go:334] "Generic (PLEG): container finished" podID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerID="cccaeb6f948ecd1135f6da8d601c705581e047487aba53889c93aaf3130a90a3" exitCode=0 Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187754 4884 generic.go:334] "Generic (PLEG): container finished" podID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerID="1e0cef010a7066c32448d1ded41921320a4daddccc6a3858798f614a47aea74b" exitCode=2 Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerDied","Data":"cccaeb6f948ecd1135f6da8d601c705581e047487aba53889c93aaf3130a90a3"} Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerDied","Data":"1e0cef010a7066c32448d1ded41921320a4daddccc6a3858798f614a47aea74b"} Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerDied","Data":"b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010"} Nov 28 17:09:18 crc kubenswrapper[4884]: I1128 17:09:18.187768 4884 generic.go:334] "Generic (PLEG): container finished" podID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerID="b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010" exitCode=0 Nov 28 17:09:21 crc kubenswrapper[4884]: I1128 17:09:21.688972 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:09:21 crc kubenswrapper[4884]: E1128 17:09:21.689249 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:09:21 crc kubenswrapper[4884]: I1128 17:09:21.904669 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.1.139:3000/\": dial tcp 10.217.1.139:3000: connect: connection refused" Nov 28 17:09:23 crc kubenswrapper[4884]: I1128 17:09:23.250058 4884 generic.go:334] "Generic (PLEG): container finished" podID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerID="5f7e3787e7104ffc5911bafaf0bf06e955fdde5ea2a18b9dba242af3416c73b9" exitCode=0 Nov 28 17:09:23 crc kubenswrapper[4884]: I1128 17:09:23.250138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerDied","Data":"5f7e3787e7104ffc5911bafaf0bf06e955fdde5ea2a18b9dba242af3416c73b9"} Nov 28 17:09:23 crc kubenswrapper[4884]: I1128 17:09:23.879064 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.040244 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.040438 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.040625 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsdlg\" (UniqueName: \"kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.040730 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.041127 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.041868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.041903 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.041932 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd\") pod \"8710527d-1641-4d8c-b5e8-5ebe92677006\" (UID: \"8710527d-1641-4d8c-b5e8-5ebe92677006\") " Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.043129 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.045031 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.045054 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8710527d-1641-4d8c-b5e8-5ebe92677006-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.048961 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts" (OuterVolumeSpecName: "scripts") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.049312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg" (OuterVolumeSpecName: "kube-api-access-qsdlg") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "kube-api-access-qsdlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.096137 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.148071 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsdlg\" (UniqueName: \"kubernetes.io/projected/8710527d-1641-4d8c-b5e8-5ebe92677006-kube-api-access-qsdlg\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.148115 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.148125 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.158317 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.169679 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data" (OuterVolumeSpecName: "config-data") pod "8710527d-1641-4d8c-b5e8-5ebe92677006" (UID: "8710527d-1641-4d8c-b5e8-5ebe92677006"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.250521 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.250558 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8710527d-1641-4d8c-b5e8-5ebe92677006-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.268968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8710527d-1641-4d8c-b5e8-5ebe92677006","Type":"ContainerDied","Data":"b8e5dbb862d45249b27fe14ae984bbccc935c379f5da0e8f3e6b245e62fbadf1"} Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.269018 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.269054 4884 scope.go:117] "RemoveContainer" containerID="cccaeb6f948ecd1135f6da8d601c705581e047487aba53889c93aaf3130a90a3" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.311196 4884 scope.go:117] "RemoveContainer" containerID="1e0cef010a7066c32448d1ded41921320a4daddccc6a3858798f614a47aea74b" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.314213 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.329808 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.348385 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349225 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="sg-core" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349257 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="sg-core" Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349299 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="proxy-httpd" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349308 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="proxy-httpd" Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349322 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-notification-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349332 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-notification-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349356 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-central-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349363 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-central-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349374 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="init" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349381 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="init" Nov 28 17:09:24 crc kubenswrapper[4884]: E1128 17:09:24.349409 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="dnsmasq-dns" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349417 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="dnsmasq-dns" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349680 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="sg-core" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349700 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="proxy-httpd" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349717 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-notification-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349733 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c104c909-309e-4223-9d7e-4219963e167e" containerName="dnsmasq-dns" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.349753 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" containerName="ceilometer-central-agent" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.355233 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.357662 4884 scope.go:117] "RemoveContainer" containerID="5f7e3787e7104ffc5911bafaf0bf06e955fdde5ea2a18b9dba242af3416c73b9" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.358133 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.358420 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.360755 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.394586 4884 scope.go:117] "RemoveContainer" containerID="b2f075106ba725c325afad128e17800191dd2cc8e2ae664c86f50f6f36652010" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-config-data\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456451 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456531 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-log-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456610 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456670 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-scripts\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.456698 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-run-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.457248 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9cfh\" (UniqueName: \"kubernetes.io/projected/bc953678-c24b-48f0-b9b7-606fd6418e97-kube-api-access-s9cfh\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.559985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9cfh\" (UniqueName: \"kubernetes.io/projected/bc953678-c24b-48f0-b9b7-606fd6418e97-kube-api-access-s9cfh\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560153 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-config-data\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560214 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560283 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-log-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560349 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-scripts\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560424 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-run-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560809 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-log-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.560842 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bc953678-c24b-48f0-b9b7-606fd6418e97-run-httpd\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.565240 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-scripts\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.565597 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-config-data\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.565769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.565866 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bc953678-c24b-48f0-b9b7-606fd6418e97-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.581064 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9cfh\" (UniqueName: \"kubernetes.io/projected/bc953678-c24b-48f0-b9b7-606fd6418e97-kube-api-access-s9cfh\") pod \"ceilometer-0\" (UID: \"bc953678-c24b-48f0-b9b7-606fd6418e97\") " pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.697704 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.704203 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8710527d-1641-4d8c-b5e8-5ebe92677006" path="/var/lib/kubelet/pods/8710527d-1641-4d8c-b5e8-5ebe92677006/volumes" Nov 28 17:09:24 crc kubenswrapper[4884]: I1128 17:09:24.760537 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 28 17:09:25 crc kubenswrapper[4884]: I1128 17:09:25.229854 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 17:09:25 crc kubenswrapper[4884]: I1128 17:09:25.288952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bc953678-c24b-48f0-b9b7-606fd6418e97","Type":"ContainerStarted","Data":"623e12cf7aa46d04e6eff5af8be6181331466c0260a0da7c2d336934fd2ccedf"} Nov 28 17:09:26 crc kubenswrapper[4884]: I1128 17:09:26.303394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bc953678-c24b-48f0-b9b7-606fd6418e97","Type":"ContainerStarted","Data":"9ade633624e79b09e73b3fa21e8d16e715d86784f1dbd8d586fddc6d993e5d22"} Nov 28 17:09:26 crc kubenswrapper[4884]: I1128 17:09:26.491604 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 28 17:09:26 crc kubenswrapper[4884]: I1128 17:09:26.755496 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 28 17:09:27 crc kubenswrapper[4884]: I1128 17:09:27.044335 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 28 17:09:27 crc kubenswrapper[4884]: I1128 17:09:27.316653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bc953678-c24b-48f0-b9b7-606fd6418e97","Type":"ContainerStarted","Data":"7a8237d95b351b2960083c05086557d5959c3fc94a59951a8e513536ea170d2b"} Nov 28 17:09:28 crc kubenswrapper[4884]: I1128 17:09:28.334631 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bc953678-c24b-48f0-b9b7-606fd6418e97","Type":"ContainerStarted","Data":"19459e12fce0dd2f958d5c76d05015f0605b274cb43cddc170d3f464cde67834"} Nov 28 17:09:30 crc kubenswrapper[4884]: I1128 17:09:30.356154 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bc953678-c24b-48f0-b9b7-606fd6418e97","Type":"ContainerStarted","Data":"3aaf94b9614572a91782de1cae8b1743214d1349d740ea860223a9c320082bbd"} Nov 28 17:09:30 crc kubenswrapper[4884]: I1128 17:09:30.356949 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 17:09:30 crc kubenswrapper[4884]: I1128 17:09:30.391406 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.284189632 podStartE2EDuration="6.391384485s" podCreationTimestamp="2025-11-28 17:09:24 +0000 UTC" firstStartedPulling="2025-11-28 17:09:25.244605312 +0000 UTC m=+6604.807389113" lastFinishedPulling="2025-11-28 17:09:29.351800165 +0000 UTC m=+6608.914583966" observedRunningTime="2025-11-28 17:09:30.378267283 +0000 UTC m=+6609.941051094" watchObservedRunningTime="2025-11-28 17:09:30.391384485 +0000 UTC m=+6609.954168286" Nov 28 17:09:36 crc kubenswrapper[4884]: I1128 17:09:36.689226 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:09:36 crc kubenswrapper[4884]: E1128 17:09:36.690538 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:09:51 crc kubenswrapper[4884]: I1128 17:09:51.689412 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:09:51 crc kubenswrapper[4884]: E1128 17:09:51.690546 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:09:54 crc kubenswrapper[4884]: I1128 17:09:54.703025 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 17:10:03 crc kubenswrapper[4884]: I1128 17:10:03.688463 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:10:03 crc kubenswrapper[4884]: E1128 17:10:03.689427 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.872150 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.874922 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.878170 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.892301 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.973814 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.973924 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.974065 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqn7j\" (UniqueName: \"kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.974184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.974210 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:12 crc kubenswrapper[4884]: I1128 17:10:12.974286 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076448 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076508 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076573 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076729 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076855 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.076922 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqn7j\" (UniqueName: \"kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.077710 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.077710 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.077755 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.078043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.078198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.100604 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqn7j\" (UniqueName: \"kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j\") pod \"dnsmasq-dns-5c9b59899-x7sm4\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.200464 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.766035 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:13 crc kubenswrapper[4884]: I1128 17:10:13.830692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" event={"ID":"94c03f51-8439-4e51-b177-78d76fac32d1","Type":"ContainerStarted","Data":"d7f43120111cbc40e0f0c9bd0cb801e9bb2b847589a2bbe19ab730f95094882a"} Nov 28 17:10:14 crc kubenswrapper[4884]: I1128 17:10:14.689149 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:10:14 crc kubenswrapper[4884]: E1128 17:10:14.689630 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:10:14 crc kubenswrapper[4884]: I1128 17:10:14.841739 4884 generic.go:334] "Generic (PLEG): container finished" podID="94c03f51-8439-4e51-b177-78d76fac32d1" containerID="038ce24a5cf58091de88dbcf64efa870d34e58b1a1e347b76bfa7a3888b7aa24" exitCode=0 Nov 28 17:10:14 crc kubenswrapper[4884]: I1128 17:10:14.841791 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" event={"ID":"94c03f51-8439-4e51-b177-78d76fac32d1","Type":"ContainerDied","Data":"038ce24a5cf58091de88dbcf64efa870d34e58b1a1e347b76bfa7a3888b7aa24"} Nov 28 17:10:15 crc kubenswrapper[4884]: I1128 17:10:15.851957 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" event={"ID":"94c03f51-8439-4e51-b177-78d76fac32d1","Type":"ContainerStarted","Data":"e77ff49f6ded0ab1d859e11cf13558fb38852c2efd18a7f14864442e84b92aef"} Nov 28 17:10:15 crc kubenswrapper[4884]: I1128 17:10:15.852435 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:15 crc kubenswrapper[4884]: I1128 17:10:15.884263 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" podStartSLOduration=3.884245603 podStartE2EDuration="3.884245603s" podCreationTimestamp="2025-11-28 17:10:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:10:15.870244819 +0000 UTC m=+6655.433028620" watchObservedRunningTime="2025-11-28 17:10:15.884245603 +0000 UTC m=+6655.447029404" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.201994 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.289034 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.289408 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="dnsmasq-dns" containerID="cri-o://00f3c8eccd33dfd89a0a6b0162595ff988b382176230dd266b653e08a4965570" gracePeriod=10 Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.447984 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85cfb46855-c2bnw"] Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.451434 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.483226 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85cfb46855-c2bnw"] Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.618844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-openstack-cell1\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.618973 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-dns-svc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.619080 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-sb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.619296 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-config\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.619369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-nb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.619429 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmbxc\" (UniqueName: \"kubernetes.io/projected/86f307a6-ec5a-4857-8c6c-954119d2ef82-kube-api-access-jmbxc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-nb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723374 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmbxc\" (UniqueName: \"kubernetes.io/projected/86f307a6-ec5a-4857-8c6c-954119d2ef82-kube-api-access-jmbxc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723418 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-openstack-cell1\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-dns-svc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723538 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-sb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.723570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-config\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.724717 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-config\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.724771 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-nb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.724980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-openstack-cell1\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.725425 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-dns-svc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.725560 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/86f307a6-ec5a-4857-8c6c-954119d2ef82-ovsdbserver-sb\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.763040 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmbxc\" (UniqueName: \"kubernetes.io/projected/86f307a6-ec5a-4857-8c6c-954119d2ef82-kube-api-access-jmbxc\") pod \"dnsmasq-dns-85cfb46855-c2bnw\" (UID: \"86f307a6-ec5a-4857-8c6c-954119d2ef82\") " pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.806740 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.935333 4884 generic.go:334] "Generic (PLEG): container finished" podID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerID="00f3c8eccd33dfd89a0a6b0162595ff988b382176230dd266b653e08a4965570" exitCode=0 Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.935377 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" event={"ID":"9ce738ae-a3e7-43e9-807f-f9958db5285b","Type":"ContainerDied","Data":"00f3c8eccd33dfd89a0a6b0162595ff988b382176230dd266b653e08a4965570"} Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.935413 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" event={"ID":"9ce738ae-a3e7-43e9-807f-f9958db5285b","Type":"ContainerDied","Data":"13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b"} Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.935428 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13aff5975ceb060304b3112b928b6ac596e1f2a315cc53448c43757c40e0971b" Nov 28 17:10:23 crc kubenswrapper[4884]: I1128 17:10:23.956388 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.029709 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.030137 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9ssk\" (UniqueName: \"kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.030241 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.030326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.030404 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.052509 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk" (OuterVolumeSpecName: "kube-api-access-m9ssk") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "kube-api-access-m9ssk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.133228 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9ssk\" (UniqueName: \"kubernetes.io/projected/9ce738ae-a3e7-43e9-807f-f9958db5285b-kube-api-access-m9ssk\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.222509 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.224818 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.232565 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config" (OuterVolumeSpecName: "config") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.234125 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.234352 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") pod \"9ce738ae-a3e7-43e9-807f-f9958db5285b\" (UID: \"9ce738ae-a3e7-43e9-807f-f9958db5285b\") " Nov 28 17:10:24 crc kubenswrapper[4884]: W1128 17:10:24.234541 4884 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/9ce738ae-a3e7-43e9-807f-f9958db5285b/volumes/kubernetes.io~configmap/ovsdbserver-sb Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.234635 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ce738ae-a3e7-43e9-807f-f9958db5285b" (UID: "9ce738ae-a3e7-43e9-807f-f9958db5285b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.235371 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.235524 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.235608 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.235690 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ce738ae-a3e7-43e9-807f-f9958db5285b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.273953 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85cfb46855-c2bnw"] Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.948606 4884 generic.go:334] "Generic (PLEG): container finished" podID="86f307a6-ec5a-4857-8c6c-954119d2ef82" containerID="c3fec47c87aa90777191eeb90e5a59015eae4b11e886e50af9e07842ffe472e9" exitCode=0 Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.948687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" event={"ID":"86f307a6-ec5a-4857-8c6c-954119d2ef82","Type":"ContainerDied","Data":"c3fec47c87aa90777191eeb90e5a59015eae4b11e886e50af9e07842ffe472e9"} Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.948981 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d4bd768c7-j8zqz" Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.949001 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" event={"ID":"86f307a6-ec5a-4857-8c6c-954119d2ef82","Type":"ContainerStarted","Data":"50ebc228aa71cd0cee0c529661b256d866d944a24068d9a4bd63830cfb4d6bad"} Nov 28 17:10:24 crc kubenswrapper[4884]: I1128 17:10:24.999196 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:10:25 crc kubenswrapper[4884]: I1128 17:10:25.008611 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d4bd768c7-j8zqz"] Nov 28 17:10:25 crc kubenswrapper[4884]: I1128 17:10:25.970454 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" event={"ID":"86f307a6-ec5a-4857-8c6c-954119d2ef82","Type":"ContainerStarted","Data":"fb090fe7c47459b28c5376f1c6fc761fccd1b8e9a03df6c8563c0d15224ed185"} Nov 28 17:10:25 crc kubenswrapper[4884]: I1128 17:10:25.970647 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:26 crc kubenswrapper[4884]: I1128 17:10:26.000773 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" podStartSLOduration=3.000749276 podStartE2EDuration="3.000749276s" podCreationTimestamp="2025-11-28 17:10:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 17:10:25.984940087 +0000 UTC m=+6665.547723898" watchObservedRunningTime="2025-11-28 17:10:26.000749276 +0000 UTC m=+6665.563533087" Nov 28 17:10:26 crc kubenswrapper[4884]: I1128 17:10:26.701889 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" path="/var/lib/kubelet/pods/9ce738ae-a3e7-43e9-807f-f9958db5285b/volumes" Nov 28 17:10:27 crc kubenswrapper[4884]: I1128 17:10:27.689446 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:10:27 crc kubenswrapper[4884]: E1128 17:10:27.690112 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.035501 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd"] Nov 28 17:10:30 crc kubenswrapper[4884]: E1128 17:10:30.036367 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="dnsmasq-dns" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.036384 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="dnsmasq-dns" Nov 28 17:10:30 crc kubenswrapper[4884]: E1128 17:10:30.036435 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="init" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.036444 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="init" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.036711 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ce738ae-a3e7-43e9-807f-f9958db5285b" containerName="dnsmasq-dns" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.037684 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.039651 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.041333 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.041675 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.041850 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.050037 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd"] Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.181497 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.181932 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.182071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm4lj\" (UniqueName: \"kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.182184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.182282 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.284930 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.285060 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.285159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm4lj\" (UniqueName: \"kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.285210 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.285257 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.292821 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.293230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.294238 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.295265 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.319215 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm4lj\" (UniqueName: \"kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.358808 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:10:30 crc kubenswrapper[4884]: I1128 17:10:30.915856 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd"] Nov 28 17:10:31 crc kubenswrapper[4884]: I1128 17:10:31.059695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" event={"ID":"69a82a4c-156c-4447-8610-2d7f9572a42d","Type":"ContainerStarted","Data":"7c9bedad889d2e8120171ccbf5bec985e5b25414b4ac34cb03fbb42bcc0691ad"} Nov 28 17:10:33 crc kubenswrapper[4884]: I1128 17:10:33.808173 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85cfb46855-c2bnw" Nov 28 17:10:33 crc kubenswrapper[4884]: I1128 17:10:33.901151 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:33 crc kubenswrapper[4884]: I1128 17:10:33.901436 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="dnsmasq-dns" containerID="cri-o://e77ff49f6ded0ab1d859e11cf13558fb38852c2efd18a7f14864442e84b92aef" gracePeriod=10 Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.109797 4884 generic.go:334] "Generic (PLEG): container finished" podID="94c03f51-8439-4e51-b177-78d76fac32d1" containerID="e77ff49f6ded0ab1d859e11cf13558fb38852c2efd18a7f14864442e84b92aef" exitCode=0 Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.110009 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" event={"ID":"94c03f51-8439-4e51-b177-78d76fac32d1","Type":"ContainerDied","Data":"e77ff49f6ded0ab1d859e11cf13558fb38852c2efd18a7f14864442e84b92aef"} Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.269796 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.411947 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.412032 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.412238 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.412292 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqn7j\" (UniqueName: \"kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.412383 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.412414 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb\") pod \"94c03f51-8439-4e51-b177-78d76fac32d1\" (UID: \"94c03f51-8439-4e51-b177-78d76fac32d1\") " Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.417974 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j" (OuterVolumeSpecName: "kube-api-access-cqn7j") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "kube-api-access-cqn7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.471423 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.471692 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config" (OuterVolumeSpecName: "config") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.475418 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.478492 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.490736 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "94c03f51-8439-4e51-b177-78d76fac32d1" (UID: "94c03f51-8439-4e51-b177-78d76fac32d1"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515154 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515192 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqn7j\" (UniqueName: \"kubernetes.io/projected/94c03f51-8439-4e51-b177-78d76fac32d1-kube-api-access-cqn7j\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515210 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515225 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515237 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:35 crc kubenswrapper[4884]: I1128 17:10:35.515249 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94c03f51-8439-4e51-b177-78d76fac32d1-config\") on node \"crc\" DevicePath \"\"" Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.124491 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" event={"ID":"94c03f51-8439-4e51-b177-78d76fac32d1","Type":"ContainerDied","Data":"d7f43120111cbc40e0f0c9bd0cb801e9bb2b847589a2bbe19ab730f95094882a"} Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.124540 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9b59899-x7sm4" Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.124552 4884 scope.go:117] "RemoveContainer" containerID="e77ff49f6ded0ab1d859e11cf13558fb38852c2efd18a7f14864442e84b92aef" Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.163348 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.174409 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9b59899-x7sm4"] Nov 28 17:10:36 crc kubenswrapper[4884]: I1128 17:10:36.705038 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" path="/var/lib/kubelet/pods/94c03f51-8439-4e51-b177-78d76fac32d1/volumes" Nov 28 17:10:38 crc kubenswrapper[4884]: I1128 17:10:38.054200 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-975xp"] Nov 28 17:10:38 crc kubenswrapper[4884]: I1128 17:10:38.062444 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-975xp"] Nov 28 17:10:38 crc kubenswrapper[4884]: I1128 17:10:38.702021 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed25f143-e5ff-44d0-8715-0af6095f5c7d" path="/var/lib/kubelet/pods/ed25f143-e5ff-44d0-8715-0af6095f5c7d/volumes" Nov 28 17:10:42 crc kubenswrapper[4884]: I1128 17:10:42.192750 4884 scope.go:117] "RemoveContainer" containerID="038ce24a5cf58091de88dbcf64efa870d34e58b1a1e347b76bfa7a3888b7aa24" Nov 28 17:10:42 crc kubenswrapper[4884]: I1128 17:10:42.689234 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:10:42 crc kubenswrapper[4884]: E1128 17:10:42.689874 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:10:43 crc kubenswrapper[4884]: I1128 17:10:43.195023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" event={"ID":"69a82a4c-156c-4447-8610-2d7f9572a42d","Type":"ContainerStarted","Data":"d90d4aff965a530eb347e1283690ec90e1a2b2514d75f8dfbddd82f5b47a62e8"} Nov 28 17:10:43 crc kubenswrapper[4884]: I1128 17:10:43.221521 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" podStartSLOduration=1.8854695910000001 podStartE2EDuration="13.221501849s" podCreationTimestamp="2025-11-28 17:10:30 +0000 UTC" firstStartedPulling="2025-11-28 17:10:30.917631575 +0000 UTC m=+6670.480415376" lastFinishedPulling="2025-11-28 17:10:42.253663833 +0000 UTC m=+6681.816447634" observedRunningTime="2025-11-28 17:10:43.214885437 +0000 UTC m=+6682.777669238" watchObservedRunningTime="2025-11-28 17:10:43.221501849 +0000 UTC m=+6682.784285650" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.032164 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-f459-account-create-jzhvp"] Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.044090 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-f459-account-create-jzhvp"] Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.745471 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:10:49 crc kubenswrapper[4884]: E1128 17:10:49.746432 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="dnsmasq-dns" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.746456 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="dnsmasq-dns" Nov 28 17:10:49 crc kubenswrapper[4884]: E1128 17:10:49.746493 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="init" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.746504 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="init" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.746809 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="94c03f51-8439-4e51-b177-78d76fac32d1" containerName="dnsmasq-dns" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.748814 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.758709 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.850726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.850780 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.850847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppw8h\" (UniqueName: \"kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.953026 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.953089 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.953241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppw8h\" (UniqueName: \"kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.954601 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.955329 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:49 crc kubenswrapper[4884]: I1128 17:10:49.977904 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppw8h\" (UniqueName: \"kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h\") pod \"redhat-operators-5vq2l\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:50 crc kubenswrapper[4884]: I1128 17:10:50.072811 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:10:50 crc kubenswrapper[4884]: I1128 17:10:50.553999 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:10:50 crc kubenswrapper[4884]: W1128 17:10:50.554113 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a224f5d_807d_46d4_a363_b701cfaa54ff.slice/crio-3f2415ca48adc6e7876d472d02c5a686747caf16b233b5726a0961b9887f33ef WatchSource:0}: Error finding container 3f2415ca48adc6e7876d472d02c5a686747caf16b233b5726a0961b9887f33ef: Status 404 returned error can't find the container with id 3f2415ca48adc6e7876d472d02c5a686747caf16b233b5726a0961b9887f33ef Nov 28 17:10:50 crc kubenswrapper[4884]: I1128 17:10:50.704410 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da7f06c9-1e63-4a56-bbf3-ff053724a26c" path="/var/lib/kubelet/pods/da7f06c9-1e63-4a56-bbf3-ff053724a26c/volumes" Nov 28 17:10:51 crc kubenswrapper[4884]: I1128 17:10:51.278848 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerID="f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854" exitCode=0 Nov 28 17:10:51 crc kubenswrapper[4884]: I1128 17:10:51.278900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerDied","Data":"f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854"} Nov 28 17:10:51 crc kubenswrapper[4884]: I1128 17:10:51.278946 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerStarted","Data":"3f2415ca48adc6e7876d472d02c5a686747caf16b233b5726a0961b9887f33ef"} Nov 28 17:10:53 crc kubenswrapper[4884]: I1128 17:10:53.309516 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerStarted","Data":"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc"} Nov 28 17:10:54 crc kubenswrapper[4884]: I1128 17:10:54.690245 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:10:54 crc kubenswrapper[4884]: E1128 17:10:54.691060 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:10:55 crc kubenswrapper[4884]: I1128 17:10:55.029217 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-npx2k"] Nov 28 17:10:55 crc kubenswrapper[4884]: I1128 17:10:55.039868 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-npx2k"] Nov 28 17:10:56 crc kubenswrapper[4884]: I1128 17:10:56.948380 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="adc27ec0-61ce-4045-98ff-894f7bf14067" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 28 17:10:56 crc kubenswrapper[4884]: I1128 17:10:56.948552 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="adc27ec0-61ce-4045-98ff-894f7bf14067" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 28 17:10:57 crc kubenswrapper[4884]: I1128 17:10:57.255544 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6eebfb8-df42-4119-8ee0-88efd240e6b3" path="/var/lib/kubelet/pods/c6eebfb8-df42-4119-8ee0-88efd240e6b3/volumes" Nov 28 17:10:58 crc kubenswrapper[4884]: I1128 17:10:58.369510 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerID="fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc" exitCode=0 Nov 28 17:10:58 crc kubenswrapper[4884]: I1128 17:10:58.369645 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerDied","Data":"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc"} Nov 28 17:10:58 crc kubenswrapper[4884]: I1128 17:10:58.372579 4884 generic.go:334] "Generic (PLEG): container finished" podID="69a82a4c-156c-4447-8610-2d7f9572a42d" containerID="d90d4aff965a530eb347e1283690ec90e1a2b2514d75f8dfbddd82f5b47a62e8" exitCode=0 Nov 28 17:10:58 crc kubenswrapper[4884]: I1128 17:10:58.372620 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" event={"ID":"69a82a4c-156c-4447-8610-2d7f9572a42d","Type":"ContainerDied","Data":"d90d4aff965a530eb347e1283690ec90e1a2b2514d75f8dfbddd82f5b47a62e8"} Nov 28 17:10:59 crc kubenswrapper[4884]: I1128 17:10:59.934716 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.019926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph\") pod \"69a82a4c-156c-4447-8610-2d7f9572a42d\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.020028 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory\") pod \"69a82a4c-156c-4447-8610-2d7f9572a42d\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.020053 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key\") pod \"69a82a4c-156c-4447-8610-2d7f9572a42d\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.020281 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm4lj\" (UniqueName: \"kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj\") pod \"69a82a4c-156c-4447-8610-2d7f9572a42d\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.020341 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle\") pod \"69a82a4c-156c-4447-8610-2d7f9572a42d\" (UID: \"69a82a4c-156c-4447-8610-2d7f9572a42d\") " Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.025947 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "69a82a4c-156c-4447-8610-2d7f9572a42d" (UID: "69a82a4c-156c-4447-8610-2d7f9572a42d"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.026020 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph" (OuterVolumeSpecName: "ceph") pod "69a82a4c-156c-4447-8610-2d7f9572a42d" (UID: "69a82a4c-156c-4447-8610-2d7f9572a42d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.026270 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj" (OuterVolumeSpecName: "kube-api-access-nm4lj") pod "69a82a4c-156c-4447-8610-2d7f9572a42d" (UID: "69a82a4c-156c-4447-8610-2d7f9572a42d"). InnerVolumeSpecName "kube-api-access-nm4lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.056261 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "69a82a4c-156c-4447-8610-2d7f9572a42d" (UID: "69a82a4c-156c-4447-8610-2d7f9572a42d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.061780 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory" (OuterVolumeSpecName: "inventory") pod "69a82a4c-156c-4447-8610-2d7f9572a42d" (UID: "69a82a4c-156c-4447-8610-2d7f9572a42d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.124853 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm4lj\" (UniqueName: \"kubernetes.io/projected/69a82a4c-156c-4447-8610-2d7f9572a42d-kube-api-access-nm4lj\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.124919 4884 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.124934 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.124946 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.124958 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/69a82a4c-156c-4447-8610-2d7f9572a42d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.396983 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" event={"ID":"69a82a4c-156c-4447-8610-2d7f9572a42d","Type":"ContainerDied","Data":"7c9bedad889d2e8120171ccbf5bec985e5b25414b4ac34cb03fbb42bcc0691ad"} Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.397027 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c9bedad889d2e8120171ccbf5bec985e5b25414b4ac34cb03fbb42bcc0691ad" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.397110 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd" Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.399862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerStarted","Data":"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294"} Nov 28 17:11:00 crc kubenswrapper[4884]: I1128 17:11:00.424921 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5vq2l" podStartSLOduration=3.365532912 podStartE2EDuration="11.424901295s" podCreationTimestamp="2025-11-28 17:10:49 +0000 UTC" firstStartedPulling="2025-11-28 17:10:51.281339445 +0000 UTC m=+6690.844123246" lastFinishedPulling="2025-11-28 17:10:59.340707828 +0000 UTC m=+6698.903491629" observedRunningTime="2025-11-28 17:11:00.41980812 +0000 UTC m=+6699.982591941" watchObservedRunningTime="2025-11-28 17:11:00.424901295 +0000 UTC m=+6699.987685096" Nov 28 17:11:06 crc kubenswrapper[4884]: I1128 17:11:06.041994 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-1384-account-create-22qvb"] Nov 28 17:11:06 crc kubenswrapper[4884]: I1128 17:11:06.053160 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-1384-account-create-22qvb"] Nov 28 17:11:06 crc kubenswrapper[4884]: I1128 17:11:06.704714 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52816af2-ca28-4db9-844a-416fb7f0f417" path="/var/lib/kubelet/pods/52816af2-ca28-4db9-844a-416fb7f0f417/volumes" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.420578 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t"] Nov 28 17:11:07 crc kubenswrapper[4884]: E1128 17:11:07.421429 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a82a4c-156c-4447-8610-2d7f9572a42d" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.421447 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a82a4c-156c-4447-8610-2d7f9572a42d" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.421648 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a82a4c-156c-4447-8610-2d7f9572a42d" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.422481 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.426114 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.426209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.426573 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.429354 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.437641 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t"] Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.466466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.466592 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.466649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmdn5\" (UniqueName: \"kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.466834 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.466919 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.568929 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.568999 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.569036 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.569104 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.569141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmdn5\" (UniqueName: \"kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.574987 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.575732 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.582819 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.588603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.593972 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmdn5\" (UniqueName: \"kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:07 crc kubenswrapper[4884]: I1128 17:11:07.740609 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:11:08 crc kubenswrapper[4884]: I1128 17:11:08.422625 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t"] Nov 28 17:11:08 crc kubenswrapper[4884]: I1128 17:11:08.427668 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:11:08 crc kubenswrapper[4884]: I1128 17:11:08.487941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" event={"ID":"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68","Type":"ContainerStarted","Data":"a6231ed0b720c4285629cdd3d00dd33e9a4b17b9037022e9bfad83953be227ae"} Nov 28 17:11:08 crc kubenswrapper[4884]: I1128 17:11:08.689640 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:11:08 crc kubenswrapper[4884]: E1128 17:11:08.689908 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:11:10 crc kubenswrapper[4884]: I1128 17:11:10.073830 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:10 crc kubenswrapper[4884]: I1128 17:11:10.075203 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:10 crc kubenswrapper[4884]: I1128 17:11:10.122332 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:10 crc kubenswrapper[4884]: I1128 17:11:10.614425 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:10 crc kubenswrapper[4884]: I1128 17:11:10.683357 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:11:12 crc kubenswrapper[4884]: I1128 17:11:12.531005 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" event={"ID":"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68","Type":"ContainerStarted","Data":"8f97988ee604af7e6fa0136ed9f6b4c3f1e60d9fe9b81e06efbf10c44d691da1"} Nov 28 17:11:12 crc kubenswrapper[4884]: I1128 17:11:12.531426 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5vq2l" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="registry-server" containerID="cri-o://8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294" gracePeriod=2 Nov 28 17:11:12 crc kubenswrapper[4884]: I1128 17:11:12.554060 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" podStartSLOduration=2.48193033 podStartE2EDuration="5.554035154s" podCreationTimestamp="2025-11-28 17:11:07 +0000 UTC" firstStartedPulling="2025-11-28 17:11:08.427391162 +0000 UTC m=+6707.990174963" lastFinishedPulling="2025-11-28 17:11:11.499495986 +0000 UTC m=+6711.062279787" observedRunningTime="2025-11-28 17:11:12.552999388 +0000 UTC m=+6712.115783199" watchObservedRunningTime="2025-11-28 17:11:12.554035154 +0000 UTC m=+6712.116818965" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.039591 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.218609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppw8h\" (UniqueName: \"kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h\") pod \"8a224f5d-807d-46d4-a363-b701cfaa54ff\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.218796 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content\") pod \"8a224f5d-807d-46d4-a363-b701cfaa54ff\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.218886 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities\") pod \"8a224f5d-807d-46d4-a363-b701cfaa54ff\" (UID: \"8a224f5d-807d-46d4-a363-b701cfaa54ff\") " Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.219793 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities" (OuterVolumeSpecName: "utilities") pod "8a224f5d-807d-46d4-a363-b701cfaa54ff" (UID: "8a224f5d-807d-46d4-a363-b701cfaa54ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.223634 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h" (OuterVolumeSpecName: "kube-api-access-ppw8h") pod "8a224f5d-807d-46d4-a363-b701cfaa54ff" (UID: "8a224f5d-807d-46d4-a363-b701cfaa54ff"). InnerVolumeSpecName "kube-api-access-ppw8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.321620 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a224f5d-807d-46d4-a363-b701cfaa54ff" (UID: "8a224f5d-807d-46d4-a363-b701cfaa54ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.322288 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppw8h\" (UniqueName: \"kubernetes.io/projected/8a224f5d-807d-46d4-a363-b701cfaa54ff-kube-api-access-ppw8h\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.322482 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.322518 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a224f5d-807d-46d4-a363-b701cfaa54ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.542981 4884 generic.go:334] "Generic (PLEG): container finished" podID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerID="8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294" exitCode=0 Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.543064 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vq2l" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.543059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerDied","Data":"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294"} Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.543146 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vq2l" event={"ID":"8a224f5d-807d-46d4-a363-b701cfaa54ff","Type":"ContainerDied","Data":"3f2415ca48adc6e7876d472d02c5a686747caf16b233b5726a0961b9887f33ef"} Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.543174 4884 scope.go:117] "RemoveContainer" containerID="8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.584498 4884 scope.go:117] "RemoveContainer" containerID="fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.596325 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.603875 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5vq2l"] Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.622579 4884 scope.go:117] "RemoveContainer" containerID="f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.671917 4884 scope.go:117] "RemoveContainer" containerID="8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294" Nov 28 17:11:13 crc kubenswrapper[4884]: E1128 17:11:13.678305 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294\": container with ID starting with 8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294 not found: ID does not exist" containerID="8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.678350 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294"} err="failed to get container status \"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294\": rpc error: code = NotFound desc = could not find container \"8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294\": container with ID starting with 8c8b6f5c0c6d2e7c26cb21793eccc289f96713e9efdaad54681f364e39bba294 not found: ID does not exist" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.678379 4884 scope.go:117] "RemoveContainer" containerID="fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc" Nov 28 17:11:13 crc kubenswrapper[4884]: E1128 17:11:13.678936 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc\": container with ID starting with fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc not found: ID does not exist" containerID="fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.678987 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc"} err="failed to get container status \"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc\": rpc error: code = NotFound desc = could not find container \"fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc\": container with ID starting with fdf2000fbaee743d29d41d13389b7d5942d43c8cc3b9c2695d8fa9cdae20cffc not found: ID does not exist" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.679020 4884 scope.go:117] "RemoveContainer" containerID="f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854" Nov 28 17:11:13 crc kubenswrapper[4884]: E1128 17:11:13.679397 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854\": container with ID starting with f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854 not found: ID does not exist" containerID="f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854" Nov 28 17:11:13 crc kubenswrapper[4884]: I1128 17:11:13.679442 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854"} err="failed to get container status \"f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854\": rpc error: code = NotFound desc = could not find container \"f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854\": container with ID starting with f6ad789f44ae382b5f1b0480d64ca77e51e78d11b104c74ce4d0d00733014854 not found: ID does not exist" Nov 28 17:11:14 crc kubenswrapper[4884]: I1128 17:11:14.699150 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" path="/var/lib/kubelet/pods/8a224f5d-807d-46d4-a363-b701cfaa54ff/volumes" Nov 28 17:11:20 crc kubenswrapper[4884]: I1128 17:11:20.703548 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:11:20 crc kubenswrapper[4884]: E1128 17:11:20.704349 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:11:34 crc kubenswrapper[4884]: I1128 17:11:34.688822 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:11:34 crc kubenswrapper[4884]: E1128 17:11:34.689629 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:11:35 crc kubenswrapper[4884]: I1128 17:11:35.660983 4884 scope.go:117] "RemoveContainer" containerID="ed8f3bc9fa5b195bd801d5e16c15498073c6f97e53946a74b707d0f6d12b49ef" Nov 28 17:11:35 crc kubenswrapper[4884]: I1128 17:11:35.709359 4884 scope.go:117] "RemoveContainer" containerID="ee54ec0f35fdf91f4035f04356d67ae210c1c2eec9dd31e000ad820c9fdd637e" Nov 28 17:11:35 crc kubenswrapper[4884]: I1128 17:11:35.772689 4884 scope.go:117] "RemoveContainer" containerID="c5d0225a6c5c13b6626910e0e3d7cc5972c90921ccc764ad44a54f669a93537a" Nov 28 17:11:35 crc kubenswrapper[4884]: I1128 17:11:35.823629 4884 scope.go:117] "RemoveContainer" containerID="d9d32694c2d6d5cbe7a5b5e859922edb55ca1b7a16c71d0f42d85cd5b0ae266f" Nov 28 17:11:45 crc kubenswrapper[4884]: I1128 17:11:45.695454 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:11:45 crc kubenswrapper[4884]: E1128 17:11:45.696478 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:11:53 crc kubenswrapper[4884]: I1128 17:11:53.051017 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-9wv6z"] Nov 28 17:11:53 crc kubenswrapper[4884]: I1128 17:11:53.068000 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-9wv6z"] Nov 28 17:11:54 crc kubenswrapper[4884]: I1128 17:11:54.706363 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="443a471a-5e1a-45a6-9af3-ee911ce78535" path="/var/lib/kubelet/pods/443a471a-5e1a-45a6-9af3-ee911ce78535/volumes" Nov 28 17:12:00 crc kubenswrapper[4884]: I1128 17:12:00.706016 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:12:00 crc kubenswrapper[4884]: E1128 17:12:00.707041 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:12:12 crc kubenswrapper[4884]: I1128 17:12:12.688502 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:12:12 crc kubenswrapper[4884]: E1128 17:12:12.689248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.340117 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:15 crc kubenswrapper[4884]: E1128 17:12:15.341138 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="extract-content" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.341155 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="extract-content" Nov 28 17:12:15 crc kubenswrapper[4884]: E1128 17:12:15.341202 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="extract-utilities" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.341214 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="extract-utilities" Nov 28 17:12:15 crc kubenswrapper[4884]: E1128 17:12:15.341246 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="registry-server" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.341278 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="registry-server" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.341687 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a224f5d-807d-46d4-a363-b701cfaa54ff" containerName="registry-server" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.345133 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.361174 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.507460 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qlq7\" (UniqueName: \"kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.507603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.507826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.610023 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.610131 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.610249 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qlq7\" (UniqueName: \"kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.610592 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.610686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.635018 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qlq7\" (UniqueName: \"kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7\") pod \"community-operators-ptz67\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:15 crc kubenswrapper[4884]: I1128 17:12:15.676366 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:16 crc kubenswrapper[4884]: I1128 17:12:16.228103 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:16 crc kubenswrapper[4884]: I1128 17:12:16.251594 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerStarted","Data":"b814ad3eeb903b879d56622f7a1292c2b64acd1b9229521c6ea0db1522971acb"} Nov 28 17:12:17 crc kubenswrapper[4884]: I1128 17:12:17.265137 4884 generic.go:334] "Generic (PLEG): container finished" podID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerID="f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b" exitCode=0 Nov 28 17:12:17 crc kubenswrapper[4884]: I1128 17:12:17.265269 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerDied","Data":"f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b"} Nov 28 17:12:18 crc kubenswrapper[4884]: I1128 17:12:18.914944 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2vm77"] Nov 28 17:12:18 crc kubenswrapper[4884]: I1128 17:12:18.920705 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:18 crc kubenswrapper[4884]: I1128 17:12:18.934885 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vm77"] Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.114319 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-catalog-content\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.114561 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blwv2\" (UniqueName: \"kubernetes.io/projected/5936360b-de60-4fce-9974-8c2fbd0a113a-kube-api-access-blwv2\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.114674 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-utilities\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.217071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-utilities\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.217278 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-catalog-content\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.217389 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blwv2\" (UniqueName: \"kubernetes.io/projected/5936360b-de60-4fce-9974-8c2fbd0a113a-kube-api-access-blwv2\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.217941 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-utilities\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.217998 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5936360b-de60-4fce-9974-8c2fbd0a113a-catalog-content\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.247467 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blwv2\" (UniqueName: \"kubernetes.io/projected/5936360b-de60-4fce-9974-8c2fbd0a113a-kube-api-access-blwv2\") pod \"redhat-marketplace-2vm77\" (UID: \"5936360b-de60-4fce-9974-8c2fbd0a113a\") " pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.257815 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.285849 4884 generic.go:334] "Generic (PLEG): container finished" podID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerID="9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2" exitCode=0 Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.285952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerDied","Data":"9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2"} Nov 28 17:12:19 crc kubenswrapper[4884]: I1128 17:12:19.724460 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vm77"] Nov 28 17:12:20 crc kubenswrapper[4884]: I1128 17:12:20.300657 4884 generic.go:334] "Generic (PLEG): container finished" podID="5936360b-de60-4fce-9974-8c2fbd0a113a" containerID="bcbb2e9453f025d118e77c23d39d9d9acad5551bdfb6cd5e7ecc49909061bef4" exitCode=0 Nov 28 17:12:20 crc kubenswrapper[4884]: I1128 17:12:20.300807 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vm77" event={"ID":"5936360b-de60-4fce-9974-8c2fbd0a113a","Type":"ContainerDied","Data":"bcbb2e9453f025d118e77c23d39d9d9acad5551bdfb6cd5e7ecc49909061bef4"} Nov 28 17:12:20 crc kubenswrapper[4884]: I1128 17:12:20.301074 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vm77" event={"ID":"5936360b-de60-4fce-9974-8c2fbd0a113a","Type":"ContainerStarted","Data":"2d9365b41b9ada27f413972dda1d879b49d27e7d14592645f829e25191091a22"} Nov 28 17:12:20 crc kubenswrapper[4884]: I1128 17:12:20.304815 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerStarted","Data":"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0"} Nov 28 17:12:20 crc kubenswrapper[4884]: I1128 17:12:20.354073 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ptz67" podStartSLOduration=2.880456526 podStartE2EDuration="5.354056379s" podCreationTimestamp="2025-11-28 17:12:15 +0000 UTC" firstStartedPulling="2025-11-28 17:12:17.267154071 +0000 UTC m=+6776.829937892" lastFinishedPulling="2025-11-28 17:12:19.740753914 +0000 UTC m=+6779.303537745" observedRunningTime="2025-11-28 17:12:20.348085041 +0000 UTC m=+6779.910868842" watchObservedRunningTime="2025-11-28 17:12:20.354056379 +0000 UTC m=+6779.916840180" Nov 28 17:12:24 crc kubenswrapper[4884]: I1128 17:12:24.354747 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vm77" event={"ID":"5936360b-de60-4fce-9974-8c2fbd0a113a","Type":"ContainerStarted","Data":"c2db017961f4d841bf1adb810b2f79cdfe49a51ba2f5a84db738fe79c5ef5e2e"} Nov 28 17:12:24 crc kubenswrapper[4884]: I1128 17:12:24.689781 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:12:24 crc kubenswrapper[4884]: E1128 17:12:24.690552 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:12:25 crc kubenswrapper[4884]: I1128 17:12:25.368432 4884 generic.go:334] "Generic (PLEG): container finished" podID="5936360b-de60-4fce-9974-8c2fbd0a113a" containerID="c2db017961f4d841bf1adb810b2f79cdfe49a51ba2f5a84db738fe79c5ef5e2e" exitCode=0 Nov 28 17:12:25 crc kubenswrapper[4884]: I1128 17:12:25.368491 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vm77" event={"ID":"5936360b-de60-4fce-9974-8c2fbd0a113a","Type":"ContainerDied","Data":"c2db017961f4d841bf1adb810b2f79cdfe49a51ba2f5a84db738fe79c5ef5e2e"} Nov 28 17:12:25 crc kubenswrapper[4884]: I1128 17:12:25.676946 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:25 crc kubenswrapper[4884]: I1128 17:12:25.677553 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:25 crc kubenswrapper[4884]: I1128 17:12:25.733585 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:26 crc kubenswrapper[4884]: I1128 17:12:26.449350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:26 crc kubenswrapper[4884]: I1128 17:12:26.898749 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:28 crc kubenswrapper[4884]: I1128 17:12:28.406610 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ptz67" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="registry-server" containerID="cri-o://72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0" gracePeriod=2 Nov 28 17:12:28 crc kubenswrapper[4884]: I1128 17:12:28.407234 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vm77" event={"ID":"5936360b-de60-4fce-9974-8c2fbd0a113a","Type":"ContainerStarted","Data":"b467f3d227c6d3bbebf6d87de2428f0a226c273694d69153a3f8f63d8520229a"} Nov 28 17:12:28 crc kubenswrapper[4884]: I1128 17:12:28.433949 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2vm77" podStartSLOduration=3.006320222 podStartE2EDuration="10.433932016s" podCreationTimestamp="2025-11-28 17:12:18 +0000 UTC" firstStartedPulling="2025-11-28 17:12:20.302679955 +0000 UTC m=+6779.865463756" lastFinishedPulling="2025-11-28 17:12:27.730291729 +0000 UTC m=+6787.293075550" observedRunningTime="2025-11-28 17:12:28.424443163 +0000 UTC m=+6787.987226964" watchObservedRunningTime="2025-11-28 17:12:28.433932016 +0000 UTC m=+6787.996715817" Nov 28 17:12:28 crc kubenswrapper[4884]: I1128 17:12:28.935628 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.050959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qlq7\" (UniqueName: \"kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7\") pod \"0dc51753-fa6a-42c8-94f1-78a97d17254f\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.051091 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities\") pod \"0dc51753-fa6a-42c8-94f1-78a97d17254f\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.051299 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content\") pod \"0dc51753-fa6a-42c8-94f1-78a97d17254f\" (UID: \"0dc51753-fa6a-42c8-94f1-78a97d17254f\") " Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.052095 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities" (OuterVolumeSpecName: "utilities") pod "0dc51753-fa6a-42c8-94f1-78a97d17254f" (UID: "0dc51753-fa6a-42c8-94f1-78a97d17254f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.055951 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7" (OuterVolumeSpecName: "kube-api-access-7qlq7") pod "0dc51753-fa6a-42c8-94f1-78a97d17254f" (UID: "0dc51753-fa6a-42c8-94f1-78a97d17254f"). InnerVolumeSpecName "kube-api-access-7qlq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.107804 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0dc51753-fa6a-42c8-94f1-78a97d17254f" (UID: "0dc51753-fa6a-42c8-94f1-78a97d17254f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.153845 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.153876 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qlq7\" (UniqueName: \"kubernetes.io/projected/0dc51753-fa6a-42c8-94f1-78a97d17254f-kube-api-access-7qlq7\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.153889 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0dc51753-fa6a-42c8-94f1-78a97d17254f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.258810 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.259297 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.319231 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.419056 4884 generic.go:334] "Generic (PLEG): container finished" podID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerID="72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0" exitCode=0 Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.419146 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerDied","Data":"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0"} Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.420558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ptz67" event={"ID":"0dc51753-fa6a-42c8-94f1-78a97d17254f","Type":"ContainerDied","Data":"b814ad3eeb903b879d56622f7a1292c2b64acd1b9229521c6ea0db1522971acb"} Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.419212 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ptz67" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.420636 4884 scope.go:117] "RemoveContainer" containerID="72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.458809 4884 scope.go:117] "RemoveContainer" containerID="9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.482266 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.489477 4884 scope.go:117] "RemoveContainer" containerID="f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.494568 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ptz67"] Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.543229 4884 scope.go:117] "RemoveContainer" containerID="72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0" Nov 28 17:12:29 crc kubenswrapper[4884]: E1128 17:12:29.543948 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0\": container with ID starting with 72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0 not found: ID does not exist" containerID="72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.544024 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0"} err="failed to get container status \"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0\": rpc error: code = NotFound desc = could not find container \"72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0\": container with ID starting with 72a27f9bd537961aabe5e84e2be80cba3d7e6fe86b909c91798ef6176f58e4f0 not found: ID does not exist" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.544077 4884 scope.go:117] "RemoveContainer" containerID="9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2" Nov 28 17:12:29 crc kubenswrapper[4884]: E1128 17:12:29.544581 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2\": container with ID starting with 9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2 not found: ID does not exist" containerID="9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.544605 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2"} err="failed to get container status \"9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2\": rpc error: code = NotFound desc = could not find container \"9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2\": container with ID starting with 9120a62d78f44d17f7b699861b6439b489b00b357ad4a291b14ada9629c075a2 not found: ID does not exist" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.544620 4884 scope.go:117] "RemoveContainer" containerID="f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b" Nov 28 17:12:29 crc kubenswrapper[4884]: E1128 17:12:29.544887 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b\": container with ID starting with f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b not found: ID does not exist" containerID="f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b" Nov 28 17:12:29 crc kubenswrapper[4884]: I1128 17:12:29.544906 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b"} err="failed to get container status \"f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b\": rpc error: code = NotFound desc = could not find container \"f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b\": container with ID starting with f643e4f5c98fc8085f033e8ae00e56463fcda488ad4fc0c2625991d2aacaa34b not found: ID does not exist" Nov 28 17:12:30 crc kubenswrapper[4884]: I1128 17:12:30.699571 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" path="/var/lib/kubelet/pods/0dc51753-fa6a-42c8-94f1-78a97d17254f/volumes" Nov 28 17:12:36 crc kubenswrapper[4884]: I1128 17:12:36.006391 4884 scope.go:117] "RemoveContainer" containerID="86437da1d838bd138e80d2662433b4a857f7bebc565f2004360e75445f3ebf77" Nov 28 17:12:36 crc kubenswrapper[4884]: I1128 17:12:36.035386 4884 scope.go:117] "RemoveContainer" containerID="8ca9308e05b47470451f6c0a75405c85aa1ebc309b5cec559c23786da88a7f06" Nov 28 17:12:38 crc kubenswrapper[4884]: I1128 17:12:38.689193 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:12:38 crc kubenswrapper[4884]: E1128 17:12:38.690016 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:12:39 crc kubenswrapper[4884]: I1128 17:12:39.317754 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2vm77" Nov 28 17:12:39 crc kubenswrapper[4884]: I1128 17:12:39.409670 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vm77"] Nov 28 17:12:39 crc kubenswrapper[4884]: I1128 17:12:39.466864 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 17:12:39 crc kubenswrapper[4884]: I1128 17:12:39.467147 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-twsk5" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="registry-server" containerID="cri-o://e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358" gracePeriod=2 Nov 28 17:12:39 crc kubenswrapper[4884]: I1128 17:12:39.988809 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.109728 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62scd\" (UniqueName: \"kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd\") pod \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.109798 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities\") pod \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.109905 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content\") pod \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\" (UID: \"c0a72ebb-65a3-4657-8af5-f42f71a4fa53\") " Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.111956 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities" (OuterVolumeSpecName: "utilities") pod "c0a72ebb-65a3-4657-8af5-f42f71a4fa53" (UID: "c0a72ebb-65a3-4657-8af5-f42f71a4fa53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.117746 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd" (OuterVolumeSpecName: "kube-api-access-62scd") pod "c0a72ebb-65a3-4657-8af5-f42f71a4fa53" (UID: "c0a72ebb-65a3-4657-8af5-f42f71a4fa53"). InnerVolumeSpecName "kube-api-access-62scd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.142855 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0a72ebb-65a3-4657-8af5-f42f71a4fa53" (UID: "c0a72ebb-65a3-4657-8af5-f42f71a4fa53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.212748 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62scd\" (UniqueName: \"kubernetes.io/projected/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-kube-api-access-62scd\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.212781 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.212792 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0a72ebb-65a3-4657-8af5-f42f71a4fa53-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.545488 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerID="e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358" exitCode=0 Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.545543 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-twsk5" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.545563 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerDied","Data":"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358"} Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.545909 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-twsk5" event={"ID":"c0a72ebb-65a3-4657-8af5-f42f71a4fa53","Type":"ContainerDied","Data":"29f950309472723710a0d2da2a02151b1c9ec21863e537e5233df9c8d16d0e60"} Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.545931 4884 scope.go:117] "RemoveContainer" containerID="e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.574289 4884 scope.go:117] "RemoveContainer" containerID="dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.579034 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.590272 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-twsk5"] Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.620496 4884 scope.go:117] "RemoveContainer" containerID="de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.647453 4884 scope.go:117] "RemoveContainer" containerID="e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358" Nov 28 17:12:40 crc kubenswrapper[4884]: E1128 17:12:40.648009 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358\": container with ID starting with e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358 not found: ID does not exist" containerID="e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.648059 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358"} err="failed to get container status \"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358\": rpc error: code = NotFound desc = could not find container \"e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358\": container with ID starting with e87f44bdf9f15e0fcada7c116f94782636c44dca079eac8ae4b7e7e566e99358 not found: ID does not exist" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.648086 4884 scope.go:117] "RemoveContainer" containerID="dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc" Nov 28 17:12:40 crc kubenswrapper[4884]: E1128 17:12:40.648680 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc\": container with ID starting with dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc not found: ID does not exist" containerID="dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.648715 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc"} err="failed to get container status \"dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc\": rpc error: code = NotFound desc = could not find container \"dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc\": container with ID starting with dde75327f95be554b459f78ff67abad057fdece573eee4b7c0d8e9771ff39ddc not found: ID does not exist" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.648738 4884 scope.go:117] "RemoveContainer" containerID="de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2" Nov 28 17:12:40 crc kubenswrapper[4884]: E1128 17:12:40.649041 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2\": container with ID starting with de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2 not found: ID does not exist" containerID="de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.649062 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2"} err="failed to get container status \"de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2\": rpc error: code = NotFound desc = could not find container \"de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2\": container with ID starting with de5f3218dca9ab3346092e24eaecaf97bea8ae7f58f4605ccbd034b805a7dda2 not found: ID does not exist" Nov 28 17:12:40 crc kubenswrapper[4884]: I1128 17:12:40.704940 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" path="/var/lib/kubelet/pods/c0a72ebb-65a3-4657-8af5-f42f71a4fa53/volumes" Nov 28 17:12:49 crc kubenswrapper[4884]: I1128 17:12:49.689049 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:12:49 crc kubenswrapper[4884]: E1128 17:12:49.690239 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:13:03 crc kubenswrapper[4884]: I1128 17:13:03.689783 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:13:04 crc kubenswrapper[4884]: I1128 17:13:04.797657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a"} Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.155463 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr"] Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156578 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156597 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156611 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156619 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156649 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156659 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156705 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156712 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="extract-content" Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156721 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156728 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="extract-utilities" Nov 28 17:15:00 crc kubenswrapper[4884]: E1128 17:15:00.156736 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.156755 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.157012 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0a72ebb-65a3-4657-8af5-f42f71a4fa53" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.157045 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dc51753-fa6a-42c8-94f1-78a97d17254f" containerName="registry-server" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.158030 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.160204 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.160762 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.165800 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr"] Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.303235 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.303428 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.303684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcwbn\" (UniqueName: \"kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.405644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.405990 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.406135 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcwbn\" (UniqueName: \"kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.406824 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.417516 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.425779 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcwbn\" (UniqueName: \"kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn\") pod \"collect-profiles-29405835-g69nr\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.489649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:00 crc kubenswrapper[4884]: I1128 17:15:00.976540 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr"] Nov 28 17:15:01 crc kubenswrapper[4884]: I1128 17:15:01.957518 4884 generic.go:334] "Generic (PLEG): container finished" podID="dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" containerID="eb0a3b52512cbc0ed63b1caeb2d40764642de80f388afafb460443f5bfcc822f" exitCode=0 Nov 28 17:15:01 crc kubenswrapper[4884]: I1128 17:15:01.957781 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" event={"ID":"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5","Type":"ContainerDied","Data":"eb0a3b52512cbc0ed63b1caeb2d40764642de80f388afafb460443f5bfcc822f"} Nov 28 17:15:01 crc kubenswrapper[4884]: I1128 17:15:01.958071 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" event={"ID":"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5","Type":"ContainerStarted","Data":"35c97bb2647c7a63badd0733b8f0d0a3477483d8af417cada30ea331c12b0ec4"} Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.333054 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.478488 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume\") pod \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.478622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume\") pod \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.478667 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcwbn\" (UniqueName: \"kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn\") pod \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\" (UID: \"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5\") " Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.479136 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume" (OuterVolumeSpecName: "config-volume") pod "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" (UID: "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.479472 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.484450 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" (UID: "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.484745 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn" (OuterVolumeSpecName: "kube-api-access-dcwbn") pod "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" (UID: "dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5"). InnerVolumeSpecName "kube-api-access-dcwbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.581911 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.581966 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcwbn\" (UniqueName: \"kubernetes.io/projected/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5-kube-api-access-dcwbn\") on node \"crc\" DevicePath \"\"" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.982955 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" event={"ID":"dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5","Type":"ContainerDied","Data":"35c97bb2647c7a63badd0733b8f0d0a3477483d8af417cada30ea331c12b0ec4"} Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.983299 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35c97bb2647c7a63badd0733b8f0d0a3477483d8af417cada30ea331c12b0ec4" Nov 28 17:15:03 crc kubenswrapper[4884]: I1128 17:15:03.982996 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr" Nov 28 17:15:04 crc kubenswrapper[4884]: I1128 17:15:04.411139 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4"] Nov 28 17:15:04 crc kubenswrapper[4884]: I1128 17:15:04.420618 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-vjgb4"] Nov 28 17:15:04 crc kubenswrapper[4884]: I1128 17:15:04.700534 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e3ebe02-37c4-4e44-b874-ef4cb3189717" path="/var/lib/kubelet/pods/4e3ebe02-37c4-4e44-b874-ef4cb3189717/volumes" Nov 28 17:15:12 crc kubenswrapper[4884]: I1128 17:15:12.029192 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-b6lrp"] Nov 28 17:15:12 crc kubenswrapper[4884]: I1128 17:15:12.039274 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-b6lrp"] Nov 28 17:15:12 crc kubenswrapper[4884]: I1128 17:15:12.705282 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24634208-add4-43b8-84fe-ab8e71bd1135" path="/var/lib/kubelet/pods/24634208-add4-43b8-84fe-ab8e71bd1135/volumes" Nov 28 17:15:21 crc kubenswrapper[4884]: I1128 17:15:21.243331 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:15:21 crc kubenswrapper[4884]: I1128 17:15:21.243930 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:15:22 crc kubenswrapper[4884]: I1128 17:15:22.038575 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-a703-account-create-pzpv6"] Nov 28 17:15:22 crc kubenswrapper[4884]: I1128 17:15:22.050983 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-a703-account-create-pzpv6"] Nov 28 17:15:22 crc kubenswrapper[4884]: I1128 17:15:22.703342 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce397053-6892-4bd6-a62a-788e4b2ab31f" path="/var/lib/kubelet/pods/ce397053-6892-4bd6-a62a-788e4b2ab31f/volumes" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.050109 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-q6pfx"] Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.060483 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-q6pfx"] Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.189720 4884 scope.go:117] "RemoveContainer" containerID="00f3c8eccd33dfd89a0a6b0162595ff988b382176230dd266b653e08a4965570" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.220646 4884 scope.go:117] "RemoveContainer" containerID="157f219657269804548696e47f3d7a47b90df1b11d0f012d7122bcb78228f1d6" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.261830 4884 scope.go:117] "RemoveContainer" containerID="b931350cdf99a76d38af2259c0e3e51af8df470c4598855d730b8212d1d7e834" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.311576 4884 scope.go:117] "RemoveContainer" containerID="eb9c0ea9c72eed46045fe53d2be5ff16a894be034add7c6cb1ddf1a52794d064" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.368966 4884 scope.go:117] "RemoveContainer" containerID="8e31b905d097fbc6fb20c7de704b88149d5d54a75cf25e27a6b53a6c41b294aa" Nov 28 17:15:36 crc kubenswrapper[4884]: I1128 17:15:36.703143 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c3c3d57-bd5f-4c24-bfa2-d5231488b40b" path="/var/lib/kubelet/pods/6c3c3d57-bd5f-4c24-bfa2-d5231488b40b/volumes" Nov 28 17:15:51 crc kubenswrapper[4884]: I1128 17:15:51.243605 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:15:51 crc kubenswrapper[4884]: I1128 17:15:51.244336 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.243054 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.243673 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.243737 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.244796 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.244868 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a" gracePeriod=600 Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.799377 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a" exitCode=0 Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.799446 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a"} Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.800185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df"} Nov 28 17:16:21 crc kubenswrapper[4884]: I1128 17:16:21.800218 4884 scope.go:117] "RemoveContainer" containerID="f7839e4594f68eea1b74d53b654e6c5246b7b3aff2f38bd1f2c49bafc23641a2" Nov 28 17:16:36 crc kubenswrapper[4884]: I1128 17:16:36.532609 4884 scope.go:117] "RemoveContainer" containerID="6e2134556d2cf0adb11acb96df0d5ebe501d38000ff85cdead201b19ab34f321" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.218048 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:06 crc kubenswrapper[4884]: E1128 17:17:06.219182 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" containerName="collect-profiles" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.219201 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" containerName="collect-profiles" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.219522 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" containerName="collect-profiles" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.221431 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.233747 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.264938 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.265083 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pwmt\" (UniqueName: \"kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.265233 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.367681 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.367772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pwmt\" (UniqueName: \"kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.367849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.368323 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.368395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.389935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pwmt\" (UniqueName: \"kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt\") pod \"certified-operators-64rkj\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:06 crc kubenswrapper[4884]: I1128 17:17:06.566016 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:07 crc kubenswrapper[4884]: I1128 17:17:07.126524 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:07 crc kubenswrapper[4884]: I1128 17:17:07.276623 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerStarted","Data":"5635c5e132ab730be1801f74430cae0e867ed28413ca5e45c333dc2834f7e07f"} Nov 28 17:17:08 crc kubenswrapper[4884]: I1128 17:17:08.292780 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerID="1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e" exitCode=0 Nov 28 17:17:08 crc kubenswrapper[4884]: I1128 17:17:08.292905 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerDied","Data":"1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e"} Nov 28 17:17:08 crc kubenswrapper[4884]: I1128 17:17:08.295633 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:17:09 crc kubenswrapper[4884]: I1128 17:17:09.305754 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerStarted","Data":"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5"} Nov 28 17:17:11 crc kubenswrapper[4884]: I1128 17:17:11.337447 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerID="cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5" exitCode=0 Nov 28 17:17:11 crc kubenswrapper[4884]: I1128 17:17:11.337494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerDied","Data":"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5"} Nov 28 17:17:12 crc kubenswrapper[4884]: I1128 17:17:12.350218 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerStarted","Data":"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80"} Nov 28 17:17:12 crc kubenswrapper[4884]: I1128 17:17:12.377149 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-64rkj" podStartSLOduration=2.790006962 podStartE2EDuration="6.377133523s" podCreationTimestamp="2025-11-28 17:17:06 +0000 UTC" firstStartedPulling="2025-11-28 17:17:08.295439677 +0000 UTC m=+7067.858223478" lastFinishedPulling="2025-11-28 17:17:11.882566228 +0000 UTC m=+7071.445350039" observedRunningTime="2025-11-28 17:17:12.371943305 +0000 UTC m=+7071.934727106" watchObservedRunningTime="2025-11-28 17:17:12.377133523 +0000 UTC m=+7071.939917324" Nov 28 17:17:16 crc kubenswrapper[4884]: I1128 17:17:16.566398 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:16 crc kubenswrapper[4884]: I1128 17:17:16.567036 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:16 crc kubenswrapper[4884]: I1128 17:17:16.620784 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:17 crc kubenswrapper[4884]: I1128 17:17:17.450959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:17 crc kubenswrapper[4884]: I1128 17:17:17.504570 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:19 crc kubenswrapper[4884]: I1128 17:17:19.428449 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-64rkj" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="registry-server" containerID="cri-o://a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80" gracePeriod=2 Nov 28 17:17:19 crc kubenswrapper[4884]: I1128 17:17:19.959241 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.070754 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities\") pod \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.070890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pwmt\" (UniqueName: \"kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt\") pod \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.070941 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content\") pod \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\" (UID: \"4c9d7e3c-0095-4e98-93d4-89508b5c2d85\") " Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.071910 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities" (OuterVolumeSpecName: "utilities") pod "4c9d7e3c-0095-4e98-93d4-89508b5c2d85" (UID: "4c9d7e3c-0095-4e98-93d4-89508b5c2d85"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.077033 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt" (OuterVolumeSpecName: "kube-api-access-6pwmt") pod "4c9d7e3c-0095-4e98-93d4-89508b5c2d85" (UID: "4c9d7e3c-0095-4e98-93d4-89508b5c2d85"). InnerVolumeSpecName "kube-api-access-6pwmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.132864 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c9d7e3c-0095-4e98-93d4-89508b5c2d85" (UID: "4c9d7e3c-0095-4e98-93d4-89508b5c2d85"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.173461 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.173496 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pwmt\" (UniqueName: \"kubernetes.io/projected/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-kube-api-access-6pwmt\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.173509 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9d7e3c-0095-4e98-93d4-89508b5c2d85-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.442428 4884 generic.go:334] "Generic (PLEG): container finished" podID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerID="a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80" exitCode=0 Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.442481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerDied","Data":"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80"} Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.442515 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64rkj" event={"ID":"4c9d7e3c-0095-4e98-93d4-89508b5c2d85","Type":"ContainerDied","Data":"5635c5e132ab730be1801f74430cae0e867ed28413ca5e45c333dc2834f7e07f"} Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.442520 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64rkj" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.442537 4884 scope.go:117] "RemoveContainer" containerID="a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.470461 4884 scope.go:117] "RemoveContainer" containerID="cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.504696 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.513835 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-64rkj"] Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.517580 4884 scope.go:117] "RemoveContainer" containerID="1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.559472 4884 scope.go:117] "RemoveContainer" containerID="a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80" Nov 28 17:17:20 crc kubenswrapper[4884]: E1128 17:17:20.559887 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80\": container with ID starting with a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80 not found: ID does not exist" containerID="a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.559919 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80"} err="failed to get container status \"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80\": rpc error: code = NotFound desc = could not find container \"a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80\": container with ID starting with a701ab01816191e41a5d44a1dfb24281f8bcc932a0b5ea9c978e4290c343bc80 not found: ID does not exist" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.559939 4884 scope.go:117] "RemoveContainer" containerID="cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5" Nov 28 17:17:20 crc kubenswrapper[4884]: E1128 17:17:20.560419 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5\": container with ID starting with cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5 not found: ID does not exist" containerID="cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.560469 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5"} err="failed to get container status \"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5\": rpc error: code = NotFound desc = could not find container \"cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5\": container with ID starting with cdd7a606c2cb011a74ce3006f572d237725f3638db00aca80bb54635eb500ad5 not found: ID does not exist" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.560502 4884 scope.go:117] "RemoveContainer" containerID="1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e" Nov 28 17:17:20 crc kubenswrapper[4884]: E1128 17:17:20.560871 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e\": container with ID starting with 1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e not found: ID does not exist" containerID="1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.560899 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e"} err="failed to get container status \"1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e\": rpc error: code = NotFound desc = could not find container \"1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e\": container with ID starting with 1144b5e37d25b8081c1f17105dc09faf357f6702e015ae729ae9d75cf5ce486e not found: ID does not exist" Nov 28 17:17:20 crc kubenswrapper[4884]: I1128 17:17:20.701519 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" path="/var/lib/kubelet/pods/4c9d7e3c-0095-4e98-93d4-89508b5c2d85/volumes" Nov 28 17:17:48 crc kubenswrapper[4884]: I1128 17:17:48.040034 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-wgzbn"] Nov 28 17:17:48 crc kubenswrapper[4884]: I1128 17:17:48.050297 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-wgzbn"] Nov 28 17:17:48 crc kubenswrapper[4884]: I1128 17:17:48.701446 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abe3c9d7-5ae4-4cea-b6f0-739db22b8eec" path="/var/lib/kubelet/pods/abe3c9d7-5ae4-4cea-b6f0-739db22b8eec/volumes" Nov 28 17:17:57 crc kubenswrapper[4884]: I1128 17:17:57.032694 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-3e39-account-create-zzz2j"] Nov 28 17:17:57 crc kubenswrapper[4884]: I1128 17:17:57.044540 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-3e39-account-create-zzz2j"] Nov 28 17:17:58 crc kubenswrapper[4884]: I1128 17:17:58.701224 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d0f3da1-73b0-4f89-898a-4c00b2de52f5" path="/var/lib/kubelet/pods/9d0f3da1-73b0-4f89-898a-4c00b2de52f5/volumes" Nov 28 17:18:10 crc kubenswrapper[4884]: I1128 17:18:10.046611 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-2b7rf"] Nov 28 17:18:10 crc kubenswrapper[4884]: I1128 17:18:10.056509 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-2b7rf"] Nov 28 17:18:10 crc kubenswrapper[4884]: I1128 17:18:10.699421 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6ea284a-9a80-4e74-af52-1cca813bb0da" path="/var/lib/kubelet/pods/e6ea284a-9a80-4e74-af52-1cca813bb0da/volumes" Nov 28 17:18:21 crc kubenswrapper[4884]: I1128 17:18:21.242559 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:18:21 crc kubenswrapper[4884]: I1128 17:18:21.243103 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:18:33 crc kubenswrapper[4884]: I1128 17:18:33.041412 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-vskr8"] Nov 28 17:18:33 crc kubenswrapper[4884]: I1128 17:18:33.053970 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-vskr8"] Nov 28 17:18:34 crc kubenswrapper[4884]: I1128 17:18:34.720744 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39b37ab6-a0b0-40f6-9e11-72c135a17ed9" path="/var/lib/kubelet/pods/39b37ab6-a0b0-40f6-9e11-72c135a17ed9/volumes" Nov 28 17:18:36 crc kubenswrapper[4884]: I1128 17:18:36.644219 4884 scope.go:117] "RemoveContainer" containerID="d8c50f43705b7789f5a611eb7940e5f64bd5d16b6415742fc956343c7782b38b" Nov 28 17:18:36 crc kubenswrapper[4884]: I1128 17:18:36.677257 4884 scope.go:117] "RemoveContainer" containerID="a03cfb62aa61e91ac0eef2d5295bf504a261012a75869df80510b0edb59db2c2" Nov 28 17:18:36 crc kubenswrapper[4884]: I1128 17:18:36.738477 4884 scope.go:117] "RemoveContainer" containerID="e0bd2370da465b7d17abca4d491f69548acb5e329c2d4e7a86cf6c878c6a3eb5" Nov 28 17:18:36 crc kubenswrapper[4884]: I1128 17:18:36.786513 4884 scope.go:117] "RemoveContainer" containerID="e76ca8e6ea27e28b60fffac18f57308a219c3d714d7108f74e12d28e7f04fa9d" Nov 28 17:18:51 crc kubenswrapper[4884]: I1128 17:18:51.243440 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:18:51 crc kubenswrapper[4884]: I1128 17:18:51.243964 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:18:52 crc kubenswrapper[4884]: I1128 17:18:52.035530 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-2c0c-account-create-lxc5k"] Nov 28 17:18:52 crc kubenswrapper[4884]: I1128 17:18:52.046905 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-2c0c-account-create-lxc5k"] Nov 28 17:18:52 crc kubenswrapper[4884]: I1128 17:18:52.699325 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="241df297-4724-41e5-b16d-8399fa93d3ee" path="/var/lib/kubelet/pods/241df297-4724-41e5-b16d-8399fa93d3ee/volumes" Nov 28 17:19:03 crc kubenswrapper[4884]: I1128 17:19:03.042323 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-q4rsb"] Nov 28 17:19:03 crc kubenswrapper[4884]: I1128 17:19:03.053762 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-q4rsb"] Nov 28 17:19:04 crc kubenswrapper[4884]: I1128 17:19:04.700483 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47b54110-28ef-40c8-ab06-9be4e188f54f" path="/var/lib/kubelet/pods/47b54110-28ef-40c8-ab06-9be4e188f54f/volumes" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.243461 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.244227 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.244280 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.245075 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.245144 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" gracePeriod=600 Nov 28 17:19:21 crc kubenswrapper[4884]: E1128 17:19:21.370365 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.761627 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" exitCode=0 Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.761840 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df"} Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.761991 4884 scope.go:117] "RemoveContainer" containerID="4778d6de2c15d4ec40cb2eed2fdc0463b2bf7385a34f4a337926739b80d7f18a" Nov 28 17:19:21 crc kubenswrapper[4884]: I1128 17:19:21.762767 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:19:21 crc kubenswrapper[4884]: E1128 17:19:21.764403 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:19:35 crc kubenswrapper[4884]: I1128 17:19:35.689144 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:19:35 crc kubenswrapper[4884]: E1128 17:19:35.689923 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:19:36 crc kubenswrapper[4884]: I1128 17:19:36.923653 4884 scope.go:117] "RemoveContainer" containerID="6c0d8b7ceda2febdb3de715a6b072849dbddb5e9bd2bfe128341ba580f63c325" Nov 28 17:19:36 crc kubenswrapper[4884]: I1128 17:19:36.959612 4884 scope.go:117] "RemoveContainer" containerID="bbe8319175ab437e27d9d549737b901d079bd54ae2d038514e5f968717370d07" Nov 28 17:19:49 crc kubenswrapper[4884]: I1128 17:19:49.687941 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:19:49 crc kubenswrapper[4884]: E1128 17:19:49.688747 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:20:04 crc kubenswrapper[4884]: I1128 17:20:04.689676 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:20:04 crc kubenswrapper[4884]: E1128 17:20:04.690442 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:20:18 crc kubenswrapper[4884]: I1128 17:20:18.688412 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:20:18 crc kubenswrapper[4884]: E1128 17:20:18.689216 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:20:31 crc kubenswrapper[4884]: I1128 17:20:31.689790 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:20:31 crc kubenswrapper[4884]: E1128 17:20:31.691324 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:20:45 crc kubenswrapper[4884]: I1128 17:20:45.689483 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:20:45 crc kubenswrapper[4884]: E1128 17:20:45.690263 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:20:59 crc kubenswrapper[4884]: I1128 17:20:59.688796 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:20:59 crc kubenswrapper[4884]: E1128 17:20:59.690041 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:21:14 crc kubenswrapper[4884]: I1128 17:21:14.688745 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:21:14 crc kubenswrapper[4884]: E1128 17:21:14.689744 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:21:29 crc kubenswrapper[4884]: I1128 17:21:29.689255 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:21:29 crc kubenswrapper[4884]: E1128 17:21:29.690306 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:21:40 crc kubenswrapper[4884]: I1128 17:21:40.698920 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:21:40 crc kubenswrapper[4884]: E1128 17:21:40.754582 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:21:53 crc kubenswrapper[4884]: I1128 17:21:53.688884 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:21:53 crc kubenswrapper[4884]: E1128 17:21:53.691296 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:21:55 crc kubenswrapper[4884]: I1128 17:21:55.307485 4884 generic.go:334] "Generic (PLEG): container finished" podID="4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" containerID="8f97988ee604af7e6fa0136ed9f6b4c3f1e60d9fe9b81e06efbf10c44d691da1" exitCode=0 Nov 28 17:21:55 crc kubenswrapper[4884]: I1128 17:21:55.307560 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" event={"ID":"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68","Type":"ContainerDied","Data":"8f97988ee604af7e6fa0136ed9f6b4c3f1e60d9fe9b81e06efbf10c44d691da1"} Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.742861 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.910471 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle\") pod \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.910617 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key\") pod \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.910659 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmdn5\" (UniqueName: \"kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5\") pod \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.910772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph\") pod \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.910829 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory\") pod \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\" (UID: \"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68\") " Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.915654 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph" (OuterVolumeSpecName: "ceph") pod "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" (UID: "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.916287 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5" (OuterVolumeSpecName: "kube-api-access-dmdn5") pod "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" (UID: "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68"). InnerVolumeSpecName "kube-api-access-dmdn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.918949 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" (UID: "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.941234 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" (UID: "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:21:56 crc kubenswrapper[4884]: I1128 17:21:56.943291 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory" (OuterVolumeSpecName: "inventory") pod "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" (UID: "4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.014445 4884 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.014855 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.014872 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmdn5\" (UniqueName: \"kubernetes.io/projected/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-kube-api-access-dmdn5\") on node \"crc\" DevicePath \"\"" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.014889 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.014901 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.327979 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" event={"ID":"4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68","Type":"ContainerDied","Data":"a6231ed0b720c4285629cdd3d00dd33e9a4b17b9037022e9bfad83953be227ae"} Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.328031 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6231ed0b720c4285629cdd3d00dd33e9a4b17b9037022e9bfad83953be227ae" Nov 28 17:21:57 crc kubenswrapper[4884]: I1128 17:21:57.328054 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.587597 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:21:59 crc kubenswrapper[4884]: E1128 17:21:59.588378 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588394 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 17:21:59 crc kubenswrapper[4884]: E1128 17:21:59.588409 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="registry-server" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588416 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="registry-server" Nov 28 17:21:59 crc kubenswrapper[4884]: E1128 17:21:59.588437 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="extract-content" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588443 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="extract-content" Nov 28 17:21:59 crc kubenswrapper[4884]: E1128 17:21:59.588468 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="extract-utilities" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588474 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="extract-utilities" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588682 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c9d7e3c-0095-4e98-93d4-89508b5c2d85" containerName="registry-server" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.588707 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.591422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.626971 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.667030 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.667220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.667299 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m85cz\" (UniqueName: \"kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.771314 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.771449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.771515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m85cz\" (UniqueName: \"kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.771753 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.772410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.793913 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m85cz\" (UniqueName: \"kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz\") pod \"redhat-operators-8rgdz\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:21:59 crc kubenswrapper[4884]: I1128 17:21:59.916563 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:00 crc kubenswrapper[4884]: I1128 17:22:00.489545 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:22:01 crc kubenswrapper[4884]: I1128 17:22:01.365387 4884 generic.go:334] "Generic (PLEG): container finished" podID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerID="0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d" exitCode=0 Nov 28 17:22:01 crc kubenswrapper[4884]: I1128 17:22:01.365647 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerDied","Data":"0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d"} Nov 28 17:22:01 crc kubenswrapper[4884]: I1128 17:22:01.365737 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerStarted","Data":"f180deeb12edcce6e312e02e42f2a9e61eb1584d16ce0d41581919284d6e421f"} Nov 28 17:22:04 crc kubenswrapper[4884]: I1128 17:22:04.396460 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerStarted","Data":"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d"} Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.223210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-t5cc9"] Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.228348 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.232874 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.235057 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.235139 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.235289 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.252223 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-t5cc9"] Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.296658 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.296844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.296887 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.296929 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.297114 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j64l7\" (UniqueName: \"kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.398644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.399002 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.399046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.399220 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j64l7\" (UniqueName: \"kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.399253 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.405569 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.407220 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.412757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.413194 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.434166 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j64l7\" (UniqueName: \"kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7\") pod \"bootstrap-openstack-openstack-cell1-t5cc9\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:05 crc kubenswrapper[4884]: I1128 17:22:05.546450 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:22:06 crc kubenswrapper[4884]: W1128 17:22:06.210640 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9300d8e8_5928_41e7_b7d8_a073b49ce0af.slice/crio-45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc WatchSource:0}: Error finding container 45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc: Status 404 returned error can't find the container with id 45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc Nov 28 17:22:06 crc kubenswrapper[4884]: I1128 17:22:06.211668 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-t5cc9"] Nov 28 17:22:06 crc kubenswrapper[4884]: I1128 17:22:06.420101 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" event={"ID":"9300d8e8-5928-41e7-b7d8-a073b49ce0af","Type":"ContainerStarted","Data":"45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc"} Nov 28 17:22:07 crc kubenswrapper[4884]: I1128 17:22:07.435617 4884 generic.go:334] "Generic (PLEG): container finished" podID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerID="9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d" exitCode=0 Nov 28 17:22:07 crc kubenswrapper[4884]: I1128 17:22:07.435705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerDied","Data":"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d"} Nov 28 17:22:08 crc kubenswrapper[4884]: I1128 17:22:08.449685 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" event={"ID":"9300d8e8-5928-41e7-b7d8-a073b49ce0af","Type":"ContainerStarted","Data":"842600e215ac343efc706eb165eb4cfb34b852a7547e8ed652ab70e1d12ae932"} Nov 28 17:22:08 crc kubenswrapper[4884]: I1128 17:22:08.454072 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerStarted","Data":"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e"} Nov 28 17:22:08 crc kubenswrapper[4884]: I1128 17:22:08.487116 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" podStartSLOduration=2.060521039 podStartE2EDuration="3.487080874s" podCreationTimestamp="2025-11-28 17:22:05 +0000 UTC" firstStartedPulling="2025-11-28 17:22:06.21699934 +0000 UTC m=+7365.779783141" lastFinishedPulling="2025-11-28 17:22:07.643559175 +0000 UTC m=+7367.206342976" observedRunningTime="2025-11-28 17:22:08.476233458 +0000 UTC m=+7368.039017259" watchObservedRunningTime="2025-11-28 17:22:08.487080874 +0000 UTC m=+7368.049864675" Nov 28 17:22:08 crc kubenswrapper[4884]: I1128 17:22:08.512975 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8rgdz" podStartSLOduration=2.981310159 podStartE2EDuration="9.51292578s" podCreationTimestamp="2025-11-28 17:21:59 +0000 UTC" firstStartedPulling="2025-11-28 17:22:01.367357673 +0000 UTC m=+7360.930141474" lastFinishedPulling="2025-11-28 17:22:07.898973294 +0000 UTC m=+7367.461757095" observedRunningTime="2025-11-28 17:22:08.501867018 +0000 UTC m=+7368.064650819" watchObservedRunningTime="2025-11-28 17:22:08.51292578 +0000 UTC m=+7368.075709601" Nov 28 17:22:08 crc kubenswrapper[4884]: I1128 17:22:08.689697 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:22:08 crc kubenswrapper[4884]: E1128 17:22:08.690008 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:22:09 crc kubenswrapper[4884]: I1128 17:22:09.917464 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:09 crc kubenswrapper[4884]: I1128 17:22:09.917580 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:10 crc kubenswrapper[4884]: I1128 17:22:10.967411 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8rgdz" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="registry-server" probeResult="failure" output=< Nov 28 17:22:10 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:22:10 crc kubenswrapper[4884]: > Nov 28 17:22:19 crc kubenswrapper[4884]: I1128 17:22:19.688847 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:22:19 crc kubenswrapper[4884]: E1128 17:22:19.689682 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:22:19 crc kubenswrapper[4884]: I1128 17:22:19.969742 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:20 crc kubenswrapper[4884]: I1128 17:22:20.020004 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:20 crc kubenswrapper[4884]: I1128 17:22:20.205848 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:22:21 crc kubenswrapper[4884]: I1128 17:22:21.589795 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8rgdz" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="registry-server" containerID="cri-o://0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e" gracePeriod=2 Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.099125 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.279056 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities\") pod \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.279197 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content\") pod \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.279240 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m85cz\" (UniqueName: \"kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz\") pod \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\" (UID: \"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5\") " Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.279887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities" (OuterVolumeSpecName: "utilities") pod "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" (UID: "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.284838 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz" (OuterVolumeSpecName: "kube-api-access-m85cz") pod "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" (UID: "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5"). InnerVolumeSpecName "kube-api-access-m85cz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.382515 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.382554 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m85cz\" (UniqueName: \"kubernetes.io/projected/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-kube-api-access-m85cz\") on node \"crc\" DevicePath \"\"" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.392768 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" (UID: "a52289c1-abb0-46db-a1ac-0fc2edb5f6f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.484312 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.608450 4884 generic.go:334] "Generic (PLEG): container finished" podID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerID="0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e" exitCode=0 Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.608494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerDied","Data":"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e"} Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.608518 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rgdz" event={"ID":"a52289c1-abb0-46db-a1ac-0fc2edb5f6f5","Type":"ContainerDied","Data":"f180deeb12edcce6e312e02e42f2a9e61eb1584d16ce0d41581919284d6e421f"} Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.608533 4884 scope.go:117] "RemoveContainer" containerID="0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.608656 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rgdz" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.644363 4884 scope.go:117] "RemoveContainer" containerID="9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.644836 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.659924 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8rgdz"] Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.667749 4884 scope.go:117] "RemoveContainer" containerID="0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.700957 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" path="/var/lib/kubelet/pods/a52289c1-abb0-46db-a1ac-0fc2edb5f6f5/volumes" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.719480 4884 scope.go:117] "RemoveContainer" containerID="0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e" Nov 28 17:22:22 crc kubenswrapper[4884]: E1128 17:22:22.720203 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e\": container with ID starting with 0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e not found: ID does not exist" containerID="0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.720267 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e"} err="failed to get container status \"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e\": rpc error: code = NotFound desc = could not find container \"0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e\": container with ID starting with 0d8c19a5b906627c4c9f46ef87e0f2e048931888407b2387263423cd8357318e not found: ID does not exist" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.720318 4884 scope.go:117] "RemoveContainer" containerID="9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d" Nov 28 17:22:22 crc kubenswrapper[4884]: E1128 17:22:22.722229 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d\": container with ID starting with 9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d not found: ID does not exist" containerID="9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.722263 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d"} err="failed to get container status \"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d\": rpc error: code = NotFound desc = could not find container \"9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d\": container with ID starting with 9d1980f5be7697d67143f089211e1ef1f01f7b032c00335fb571807244b4437d not found: ID does not exist" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.722290 4884 scope.go:117] "RemoveContainer" containerID="0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d" Nov 28 17:22:22 crc kubenswrapper[4884]: E1128 17:22:22.722776 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d\": container with ID starting with 0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d not found: ID does not exist" containerID="0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d" Nov 28 17:22:22 crc kubenswrapper[4884]: I1128 17:22:22.722825 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d"} err="failed to get container status \"0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d\": rpc error: code = NotFound desc = could not find container \"0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d\": container with ID starting with 0af87d9d4f537af71561b116fa37c4eda3e44d598d7c8b809bc9f7156bdca56d not found: ID does not exist" Nov 28 17:22:32 crc kubenswrapper[4884]: I1128 17:22:32.689146 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:22:32 crc kubenswrapper[4884]: E1128 17:22:32.689974 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:22:45 crc kubenswrapper[4884]: I1128 17:22:45.688917 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:22:45 crc kubenswrapper[4884]: E1128 17:22:45.689748 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.204241 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:22:51 crc kubenswrapper[4884]: E1128 17:22:51.205373 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="registry-server" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.205386 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="registry-server" Nov 28 17:22:51 crc kubenswrapper[4884]: E1128 17:22:51.205404 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="extract-utilities" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.205410 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="extract-utilities" Nov 28 17:22:51 crc kubenswrapper[4884]: E1128 17:22:51.205429 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="extract-content" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.205437 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="extract-content" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.205634 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a52289c1-abb0-46db-a1ac-0fc2edb5f6f5" containerName="registry-server" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.207310 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.222408 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.326521 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.326571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.326710 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g9sz\" (UniqueName: \"kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.429059 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.429117 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.429218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g9sz\" (UniqueName: \"kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.429565 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.429867 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.452243 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g9sz\" (UniqueName: \"kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz\") pod \"community-operators-m9hgr\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:51 crc kubenswrapper[4884]: I1128 17:22:51.534680 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:22:52 crc kubenswrapper[4884]: I1128 17:22:52.026676 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:22:52 crc kubenswrapper[4884]: I1128 17:22:52.902522 4884 generic.go:334] "Generic (PLEG): container finished" podID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerID="1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb" exitCode=0 Nov 28 17:22:52 crc kubenswrapper[4884]: I1128 17:22:52.902583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerDied","Data":"1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb"} Nov 28 17:22:52 crc kubenswrapper[4884]: I1128 17:22:52.902859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerStarted","Data":"2d76f52139109b03e5b5603fa85e901a3238d27588a2f6e013eeafc5a35ef5b9"} Nov 28 17:22:52 crc kubenswrapper[4884]: I1128 17:22:52.905117 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:22:53 crc kubenswrapper[4884]: I1128 17:22:53.915210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerStarted","Data":"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57"} Nov 28 17:22:54 crc kubenswrapper[4884]: I1128 17:22:54.924707 4884 generic.go:334] "Generic (PLEG): container finished" podID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerID="594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57" exitCode=0 Nov 28 17:22:54 crc kubenswrapper[4884]: I1128 17:22:54.924772 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerDied","Data":"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57"} Nov 28 17:22:55 crc kubenswrapper[4884]: I1128 17:22:55.935648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerStarted","Data":"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf"} Nov 28 17:22:55 crc kubenswrapper[4884]: I1128 17:22:55.958199 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m9hgr" podStartSLOduration=2.4577130240000002 podStartE2EDuration="4.958183864s" podCreationTimestamp="2025-11-28 17:22:51 +0000 UTC" firstStartedPulling="2025-11-28 17:22:52.904876592 +0000 UTC m=+7412.467660393" lastFinishedPulling="2025-11-28 17:22:55.405347422 +0000 UTC m=+7414.968131233" observedRunningTime="2025-11-28 17:22:55.95555609 +0000 UTC m=+7415.518339891" watchObservedRunningTime="2025-11-28 17:22:55.958183864 +0000 UTC m=+7415.520967665" Nov 28 17:22:59 crc kubenswrapper[4884]: I1128 17:22:59.688486 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:22:59 crc kubenswrapper[4884]: E1128 17:22:59.689067 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:23:01 crc kubenswrapper[4884]: I1128 17:23:01.535696 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:01 crc kubenswrapper[4884]: I1128 17:23:01.535968 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:01 crc kubenswrapper[4884]: I1128 17:23:01.604547 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:02 crc kubenswrapper[4884]: I1128 17:23:02.058619 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:02 crc kubenswrapper[4884]: I1128 17:23:02.117454 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.009377 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m9hgr" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="registry-server" containerID="cri-o://4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf" gracePeriod=2 Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.550218 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.707777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g9sz\" (UniqueName: \"kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz\") pod \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.707843 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities\") pod \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.708270 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content\") pod \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\" (UID: \"2cdb90a9-b954-4e93-b19c-85fa6f545adc\") " Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.708791 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities" (OuterVolumeSpecName: "utilities") pod "2cdb90a9-b954-4e93-b19c-85fa6f545adc" (UID: "2cdb90a9-b954-4e93-b19c-85fa6f545adc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.709372 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.716734 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz" (OuterVolumeSpecName: "kube-api-access-6g9sz") pod "2cdb90a9-b954-4e93-b19c-85fa6f545adc" (UID: "2cdb90a9-b954-4e93-b19c-85fa6f545adc"). InnerVolumeSpecName "kube-api-access-6g9sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.777472 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2cdb90a9-b954-4e93-b19c-85fa6f545adc" (UID: "2cdb90a9-b954-4e93-b19c-85fa6f545adc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.811725 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g9sz\" (UniqueName: \"kubernetes.io/projected/2cdb90a9-b954-4e93-b19c-85fa6f545adc-kube-api-access-6g9sz\") on node \"crc\" DevicePath \"\"" Nov 28 17:23:04 crc kubenswrapper[4884]: I1128 17:23:04.811756 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2cdb90a9-b954-4e93-b19c-85fa6f545adc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.025304 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m9hgr" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.025362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerDied","Data":"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf"} Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.026176 4884 scope.go:117] "RemoveContainer" containerID="4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.025170 4884 generic.go:334] "Generic (PLEG): container finished" podID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerID="4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf" exitCode=0 Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.029341 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m9hgr" event={"ID":"2cdb90a9-b954-4e93-b19c-85fa6f545adc","Type":"ContainerDied","Data":"2d76f52139109b03e5b5603fa85e901a3238d27588a2f6e013eeafc5a35ef5b9"} Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.055235 4884 scope.go:117] "RemoveContainer" containerID="594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.066697 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.075888 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m9hgr"] Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.091509 4884 scope.go:117] "RemoveContainer" containerID="1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.147072 4884 scope.go:117] "RemoveContainer" containerID="4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf" Nov 28 17:23:05 crc kubenswrapper[4884]: E1128 17:23:05.147619 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf\": container with ID starting with 4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf not found: ID does not exist" containerID="4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.147662 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf"} err="failed to get container status \"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf\": rpc error: code = NotFound desc = could not find container \"4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf\": container with ID starting with 4553a21a92b608d686c28ef9e39183fa90896e4e5b44625d5db0dde2a27a87cf not found: ID does not exist" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.147693 4884 scope.go:117] "RemoveContainer" containerID="594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57" Nov 28 17:23:05 crc kubenswrapper[4884]: E1128 17:23:05.148261 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57\": container with ID starting with 594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57 not found: ID does not exist" containerID="594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.148302 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57"} err="failed to get container status \"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57\": rpc error: code = NotFound desc = could not find container \"594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57\": container with ID starting with 594430191cecd8e8a84adcc683460b0a1ad5d5e562bdab2fd726143f17f4ab57 not found: ID does not exist" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.148321 4884 scope.go:117] "RemoveContainer" containerID="1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb" Nov 28 17:23:05 crc kubenswrapper[4884]: E1128 17:23:05.148735 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb\": container with ID starting with 1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb not found: ID does not exist" containerID="1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb" Nov 28 17:23:05 crc kubenswrapper[4884]: I1128 17:23:05.148764 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb"} err="failed to get container status \"1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb\": rpc error: code = NotFound desc = could not find container \"1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb\": container with ID starting with 1604272de43e8d710cf3be25f59b137c1764e4e58d1419a8d426d37506c02beb not found: ID does not exist" Nov 28 17:23:06 crc kubenswrapper[4884]: I1128 17:23:06.701780 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" path="/var/lib/kubelet/pods/2cdb90a9-b954-4e93-b19c-85fa6f545adc/volumes" Nov 28 17:23:12 crc kubenswrapper[4884]: I1128 17:23:12.690019 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:23:12 crc kubenswrapper[4884]: E1128 17:23:12.691268 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:23:23 crc kubenswrapper[4884]: I1128 17:23:23.689154 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:23:23 crc kubenswrapper[4884]: E1128 17:23:23.689851 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:23:38 crc kubenswrapper[4884]: I1128 17:23:38.689572 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:23:38 crc kubenswrapper[4884]: E1128 17:23:38.690814 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:23:51 crc kubenswrapper[4884]: I1128 17:23:51.688904 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:23:51 crc kubenswrapper[4884]: E1128 17:23:51.689933 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:24:05 crc kubenswrapper[4884]: I1128 17:24:05.689214 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:24:05 crc kubenswrapper[4884]: E1128 17:24:05.690046 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:24:17 crc kubenswrapper[4884]: I1128 17:24:17.688751 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:24:17 crc kubenswrapper[4884]: E1128 17:24:17.691026 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:24:32 crc kubenswrapper[4884]: I1128 17:24:32.688943 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:24:32 crc kubenswrapper[4884]: I1128 17:24:32.921069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88"} Nov 28 17:25:21 crc kubenswrapper[4884]: I1128 17:25:21.397362 4884 generic.go:334] "Generic (PLEG): container finished" podID="9300d8e8-5928-41e7-b7d8-a073b49ce0af" containerID="842600e215ac343efc706eb165eb4cfb34b852a7547e8ed652ab70e1d12ae932" exitCode=0 Nov 28 17:25:21 crc kubenswrapper[4884]: I1128 17:25:21.397448 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" event={"ID":"9300d8e8-5928-41e7-b7d8-a073b49ce0af","Type":"ContainerDied","Data":"842600e215ac343efc706eb165eb4cfb34b852a7547e8ed652ab70e1d12ae932"} Nov 28 17:25:22 crc kubenswrapper[4884]: I1128 17:25:22.926800 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.071727 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph\") pod \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.071844 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j64l7\" (UniqueName: \"kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7\") pod \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.071911 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory\") pod \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.072372 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key\") pod \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.072512 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle\") pod \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\" (UID: \"9300d8e8-5928-41e7-b7d8-a073b49ce0af\") " Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.078046 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph" (OuterVolumeSpecName: "ceph") pod "9300d8e8-5928-41e7-b7d8-a073b49ce0af" (UID: "9300d8e8-5928-41e7-b7d8-a073b49ce0af"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.078188 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7" (OuterVolumeSpecName: "kube-api-access-j64l7") pod "9300d8e8-5928-41e7-b7d8-a073b49ce0af" (UID: "9300d8e8-5928-41e7-b7d8-a073b49ce0af"). InnerVolumeSpecName "kube-api-access-j64l7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.081144 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "9300d8e8-5928-41e7-b7d8-a073b49ce0af" (UID: "9300d8e8-5928-41e7-b7d8-a073b49ce0af"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.111563 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory" (OuterVolumeSpecName: "inventory") pod "9300d8e8-5928-41e7-b7d8-a073b49ce0af" (UID: "9300d8e8-5928-41e7-b7d8-a073b49ce0af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.128802 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9300d8e8-5928-41e7-b7d8-a073b49ce0af" (UID: "9300d8e8-5928-41e7-b7d8-a073b49ce0af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.174923 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.175208 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j64l7\" (UniqueName: \"kubernetes.io/projected/9300d8e8-5928-41e7-b7d8-a073b49ce0af-kube-api-access-j64l7\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.175291 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.175385 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.175443 4884 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9300d8e8-5928-41e7-b7d8-a073b49ce0af-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.425963 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" event={"ID":"9300d8e8-5928-41e7-b7d8-a073b49ce0af","Type":"ContainerDied","Data":"45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc"} Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.426009 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45a913a0988fc55c652a28a472390e0ebd3166c80181083f905c92c66e8cc7bc" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.426047 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-t5cc9" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.526754 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8wpwr"] Nov 28 17:25:23 crc kubenswrapper[4884]: E1128 17:25:23.527274 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="extract-content" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527298 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="extract-content" Nov 28 17:25:23 crc kubenswrapper[4884]: E1128 17:25:23.527362 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="registry-server" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527372 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="registry-server" Nov 28 17:25:23 crc kubenswrapper[4884]: E1128 17:25:23.527388 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9300d8e8-5928-41e7-b7d8-a073b49ce0af" containerName="bootstrap-openstack-openstack-cell1" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527396 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9300d8e8-5928-41e7-b7d8-a073b49ce0af" containerName="bootstrap-openstack-openstack-cell1" Nov 28 17:25:23 crc kubenswrapper[4884]: E1128 17:25:23.527415 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="extract-utilities" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527423 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="extract-utilities" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527665 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9300d8e8-5928-41e7-b7d8-a073b49ce0af" containerName="bootstrap-openstack-openstack-cell1" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.527702 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cdb90a9-b954-4e93-b19c-85fa6f545adc" containerName="registry-server" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.528646 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.531564 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.531690 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.531754 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.531888 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.538211 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8wpwr"] Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.584849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs8x9\" (UniqueName: \"kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.585158 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.585240 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.585464 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.687810 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.687954 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.688116 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.688247 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs8x9\" (UniqueName: \"kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.693149 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.694182 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.694510 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.706303 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs8x9\" (UniqueName: \"kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9\") pod \"download-cache-openstack-openstack-cell1-8wpwr\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:23 crc kubenswrapper[4884]: I1128 17:25:23.855221 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:25:25 crc kubenswrapper[4884]: I1128 17:25:25.068491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8wpwr"] Nov 28 17:25:25 crc kubenswrapper[4884]: I1128 17:25:25.941390 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" event={"ID":"5bc9bcb4-cbd4-4422-9722-16084405cf50","Type":"ContainerStarted","Data":"822b9413eb0898dfbd69f69857a4c17033b4b3f4c79a0e3e10a0f6be3fc9492e"} Nov 28 17:25:25 crc kubenswrapper[4884]: I1128 17:25:25.941893 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" event={"ID":"5bc9bcb4-cbd4-4422-9722-16084405cf50","Type":"ContainerStarted","Data":"4c8344e08c163ef953efae47b7e4bced3f59da0d5284f4bf46ffa57fbf14fef0"} Nov 28 17:25:25 crc kubenswrapper[4884]: I1128 17:25:25.957385 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" podStartSLOduration=2.504362616 podStartE2EDuration="2.957368845s" podCreationTimestamp="2025-11-28 17:25:23 +0000 UTC" firstStartedPulling="2025-11-28 17:25:25.073953794 +0000 UTC m=+7564.636737595" lastFinishedPulling="2025-11-28 17:25:25.526960023 +0000 UTC m=+7565.089743824" observedRunningTime="2025-11-28 17:25:25.955035828 +0000 UTC m=+7565.517819629" watchObservedRunningTime="2025-11-28 17:25:25.957368845 +0000 UTC m=+7565.520152646" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.178048 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.182368 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.190719 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.371385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.371771 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9sd9\" (UniqueName: \"kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.371937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.474060 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.474179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9sd9\" (UniqueName: \"kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.474244 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.474990 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.475017 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.508989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9sd9\" (UniqueName: \"kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9\") pod \"redhat-marketplace-8mdft\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:32 crc kubenswrapper[4884]: I1128 17:25:32.526526 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:33 crc kubenswrapper[4884]: I1128 17:25:33.029204 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:34 crc kubenswrapper[4884]: I1128 17:25:34.023210 4884 generic.go:334] "Generic (PLEG): container finished" podID="b30114a8-f44b-4304-9599-7ad3f6273058" containerID="c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad" exitCode=0 Nov 28 17:25:34 crc kubenswrapper[4884]: I1128 17:25:34.023327 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerDied","Data":"c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad"} Nov 28 17:25:34 crc kubenswrapper[4884]: I1128 17:25:34.025419 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerStarted","Data":"ae919d48b9bf7c637df06bf1a90d344f7f1933cd536b2c96922868c0a954f291"} Nov 28 17:25:36 crc kubenswrapper[4884]: I1128 17:25:36.045971 4884 generic.go:334] "Generic (PLEG): container finished" podID="b30114a8-f44b-4304-9599-7ad3f6273058" containerID="f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401" exitCode=0 Nov 28 17:25:36 crc kubenswrapper[4884]: I1128 17:25:36.046073 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerDied","Data":"f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401"} Nov 28 17:25:37 crc kubenswrapper[4884]: I1128 17:25:37.062534 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerStarted","Data":"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d"} Nov 28 17:25:37 crc kubenswrapper[4884]: I1128 17:25:37.101285 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8mdft" podStartSLOduration=2.6604427250000002 podStartE2EDuration="5.101261077s" podCreationTimestamp="2025-11-28 17:25:32 +0000 UTC" firstStartedPulling="2025-11-28 17:25:34.026685893 +0000 UTC m=+7573.589469694" lastFinishedPulling="2025-11-28 17:25:36.467504245 +0000 UTC m=+7576.030288046" observedRunningTime="2025-11-28 17:25:37.083800737 +0000 UTC m=+7576.646584558" watchObservedRunningTime="2025-11-28 17:25:37.101261077 +0000 UTC m=+7576.664044918" Nov 28 17:25:42 crc kubenswrapper[4884]: I1128 17:25:42.526919 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:42 crc kubenswrapper[4884]: I1128 17:25:42.527480 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:42 crc kubenswrapper[4884]: I1128 17:25:42.570663 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:43 crc kubenswrapper[4884]: I1128 17:25:43.178126 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:43 crc kubenswrapper[4884]: I1128 17:25:43.231780 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.149127 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8mdft" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="registry-server" containerID="cri-o://9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d" gracePeriod=2 Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.824519 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.970677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content\") pod \"b30114a8-f44b-4304-9599-7ad3f6273058\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.970745 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9sd9\" (UniqueName: \"kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9\") pod \"b30114a8-f44b-4304-9599-7ad3f6273058\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.970845 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities\") pod \"b30114a8-f44b-4304-9599-7ad3f6273058\" (UID: \"b30114a8-f44b-4304-9599-7ad3f6273058\") " Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.972480 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities" (OuterVolumeSpecName: "utilities") pod "b30114a8-f44b-4304-9599-7ad3f6273058" (UID: "b30114a8-f44b-4304-9599-7ad3f6273058"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.976883 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9" (OuterVolumeSpecName: "kube-api-access-w9sd9") pod "b30114a8-f44b-4304-9599-7ad3f6273058" (UID: "b30114a8-f44b-4304-9599-7ad3f6273058"). InnerVolumeSpecName "kube-api-access-w9sd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:25:45 crc kubenswrapper[4884]: I1128 17:25:45.994340 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b30114a8-f44b-4304-9599-7ad3f6273058" (UID: "b30114a8-f44b-4304-9599-7ad3f6273058"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.074133 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.074450 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9sd9\" (UniqueName: \"kubernetes.io/projected/b30114a8-f44b-4304-9599-7ad3f6273058-kube-api-access-w9sd9\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.074579 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b30114a8-f44b-4304-9599-7ad3f6273058-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.161927 4884 generic.go:334] "Generic (PLEG): container finished" podID="b30114a8-f44b-4304-9599-7ad3f6273058" containerID="9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d" exitCode=0 Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.162027 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8mdft" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.162011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerDied","Data":"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d"} Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.162481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8mdft" event={"ID":"b30114a8-f44b-4304-9599-7ad3f6273058","Type":"ContainerDied","Data":"ae919d48b9bf7c637df06bf1a90d344f7f1933cd536b2c96922868c0a954f291"} Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.162501 4884 scope.go:117] "RemoveContainer" containerID="9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.200915 4884 scope.go:117] "RemoveContainer" containerID="f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.211195 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.223478 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8mdft"] Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.232917 4884 scope.go:117] "RemoveContainer" containerID="c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.280799 4884 scope.go:117] "RemoveContainer" containerID="9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d" Nov 28 17:25:46 crc kubenswrapper[4884]: E1128 17:25:46.281377 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d\": container with ID starting with 9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d not found: ID does not exist" containerID="9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.281460 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d"} err="failed to get container status \"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d\": rpc error: code = NotFound desc = could not find container \"9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d\": container with ID starting with 9d811cc9df1f18b87e4d5181858d8a73fca76efb5bfbf2761a8e7ac20274ee2d not found: ID does not exist" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.281492 4884 scope.go:117] "RemoveContainer" containerID="f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401" Nov 28 17:25:46 crc kubenswrapper[4884]: E1128 17:25:46.281980 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401\": container with ID starting with f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401 not found: ID does not exist" containerID="f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.282009 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401"} err="failed to get container status \"f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401\": rpc error: code = NotFound desc = could not find container \"f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401\": container with ID starting with f4cad99a2715a2d2e3c09f51811f834bcc9801f7d074a11968a1d1f790ced401 not found: ID does not exist" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.282026 4884 scope.go:117] "RemoveContainer" containerID="c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad" Nov 28 17:25:46 crc kubenswrapper[4884]: E1128 17:25:46.282388 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad\": container with ID starting with c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad not found: ID does not exist" containerID="c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.282539 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad"} err="failed to get container status \"c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad\": rpc error: code = NotFound desc = could not find container \"c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad\": container with ID starting with c8b678ccf8b7b144c4c7d70abde8110eafe1b4750c753fd0cb58e8c1be5734ad not found: ID does not exist" Nov 28 17:25:46 crc kubenswrapper[4884]: I1128 17:25:46.709929 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" path="/var/lib/kubelet/pods/b30114a8-f44b-4304-9599-7ad3f6273058/volumes" Nov 28 17:26:51 crc kubenswrapper[4884]: I1128 17:26:51.242880 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:26:51 crc kubenswrapper[4884]: I1128 17:26:51.243468 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:27:04 crc kubenswrapper[4884]: I1128 17:27:04.074017 4884 generic.go:334] "Generic (PLEG): container finished" podID="5bc9bcb4-cbd4-4422-9722-16084405cf50" containerID="822b9413eb0898dfbd69f69857a4c17033b4b3f4c79a0e3e10a0f6be3fc9492e" exitCode=0 Nov 28 17:27:04 crc kubenswrapper[4884]: I1128 17:27:04.074083 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" event={"ID":"5bc9bcb4-cbd4-4422-9722-16084405cf50","Type":"ContainerDied","Data":"822b9413eb0898dfbd69f69857a4c17033b4b3f4c79a0e3e10a0f6be3fc9492e"} Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.751595 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.756640 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key\") pod \"5bc9bcb4-cbd4-4422-9722-16084405cf50\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.756788 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph\") pod \"5bc9bcb4-cbd4-4422-9722-16084405cf50\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.757048 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory\") pod \"5bc9bcb4-cbd4-4422-9722-16084405cf50\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.757127 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs8x9\" (UniqueName: \"kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9\") pod \"5bc9bcb4-cbd4-4422-9722-16084405cf50\" (UID: \"5bc9bcb4-cbd4-4422-9722-16084405cf50\") " Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.764034 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9" (OuterVolumeSpecName: "kube-api-access-rs8x9") pod "5bc9bcb4-cbd4-4422-9722-16084405cf50" (UID: "5bc9bcb4-cbd4-4422-9722-16084405cf50"). InnerVolumeSpecName "kube-api-access-rs8x9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.764435 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph" (OuterVolumeSpecName: "ceph") pod "5bc9bcb4-cbd4-4422-9722-16084405cf50" (UID: "5bc9bcb4-cbd4-4422-9722-16084405cf50"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.827726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5bc9bcb4-cbd4-4422-9722-16084405cf50" (UID: "5bc9bcb4-cbd4-4422-9722-16084405cf50"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.834479 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory" (OuterVolumeSpecName: "inventory") pod "5bc9bcb4-cbd4-4422-9722-16084405cf50" (UID: "5bc9bcb4-cbd4-4422-9722-16084405cf50"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.864984 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.865385 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs8x9\" (UniqueName: \"kubernetes.io/projected/5bc9bcb4-cbd4-4422-9722-16084405cf50-kube-api-access-rs8x9\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.865401 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:05 crc kubenswrapper[4884]: I1128 17:27:05.865414 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/5bc9bcb4-cbd4-4422-9722-16084405cf50-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.094809 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" event={"ID":"5bc9bcb4-cbd4-4422-9722-16084405cf50","Type":"ContainerDied","Data":"4c8344e08c163ef953efae47b7e4bced3f59da0d5284f4bf46ffa57fbf14fef0"} Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.094850 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4c8344e08c163ef953efae47b7e4bced3f59da0d5284f4bf46ffa57fbf14fef0" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.094907 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8wpwr" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199208 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-b67pg"] Nov 28 17:27:06 crc kubenswrapper[4884]: E1128 17:27:06.199615 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc9bcb4-cbd4-4422-9722-16084405cf50" containerName="download-cache-openstack-openstack-cell1" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199631 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc9bcb4-cbd4-4422-9722-16084405cf50" containerName="download-cache-openstack-openstack-cell1" Nov 28 17:27:06 crc kubenswrapper[4884]: E1128 17:27:06.199651 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="extract-utilities" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199658 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="extract-utilities" Nov 28 17:27:06 crc kubenswrapper[4884]: E1128 17:27:06.199680 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="registry-server" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199687 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="registry-server" Nov 28 17:27:06 crc kubenswrapper[4884]: E1128 17:27:06.199713 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="extract-content" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199718 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="extract-content" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bc9bcb4-cbd4-4422-9722-16084405cf50" containerName="download-cache-openstack-openstack-cell1" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.199925 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b30114a8-f44b-4304-9599-7ad3f6273058" containerName="registry-server" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.200715 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.202887 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.203086 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.203300 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.203561 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.216501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-b67pg"] Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.272850 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.272912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt5wx\" (UniqueName: \"kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.272944 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.272970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.375252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.375309 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt5wx\" (UniqueName: \"kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.375340 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.375361 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.379746 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.379846 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.381126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.419298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt5wx\" (UniqueName: \"kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx\") pod \"configure-network-openstack-openstack-cell1-b67pg\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:06 crc kubenswrapper[4884]: I1128 17:27:06.521591 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:27:07 crc kubenswrapper[4884]: I1128 17:27:07.143891 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-b67pg"] Nov 28 17:27:08 crc kubenswrapper[4884]: I1128 17:27:08.115579 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" event={"ID":"a4bfa85c-2676-434e-bd4e-bf610fe32231","Type":"ContainerStarted","Data":"a3ab6d6c0018b4d48b5f46e1130f7a7b5dad76aa69fd4f8280bd88b9cf5392c6"} Nov 28 17:27:08 crc kubenswrapper[4884]: I1128 17:27:08.116190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" event={"ID":"a4bfa85c-2676-434e-bd4e-bf610fe32231","Type":"ContainerStarted","Data":"684a10f4b1930436a947a50c9f59a011c22873b0e5933c3f3de4e5cc709aef09"} Nov 28 17:27:08 crc kubenswrapper[4884]: I1128 17:27:08.138236 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" podStartSLOduration=1.500586134 podStartE2EDuration="2.138210562s" podCreationTimestamp="2025-11-28 17:27:06 +0000 UTC" firstStartedPulling="2025-11-28 17:27:07.151179884 +0000 UTC m=+7666.713963675" lastFinishedPulling="2025-11-28 17:27:07.788804272 +0000 UTC m=+7667.351588103" observedRunningTime="2025-11-28 17:27:08.132860181 +0000 UTC m=+7667.695643972" watchObservedRunningTime="2025-11-28 17:27:08.138210562 +0000 UTC m=+7667.700994363" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.119521 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j24qw"] Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.124128 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.137534 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j24qw"] Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.191040 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-catalog-content\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.191126 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-utilities\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.191274 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srg9j\" (UniqueName: \"kubernetes.io/projected/b43cc6bf-3711-4f18-aa8e-881c9e48df55-kube-api-access-srg9j\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.242720 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.243028 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.292918 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srg9j\" (UniqueName: \"kubernetes.io/projected/b43cc6bf-3711-4f18-aa8e-881c9e48df55-kube-api-access-srg9j\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.293131 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-catalog-content\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.293159 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-utilities\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.293654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-utilities\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.293844 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b43cc6bf-3711-4f18-aa8e-881c9e48df55-catalog-content\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.316247 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srg9j\" (UniqueName: \"kubernetes.io/projected/b43cc6bf-3711-4f18-aa8e-881c9e48df55-kube-api-access-srg9j\") pod \"certified-operators-j24qw\" (UID: \"b43cc6bf-3711-4f18-aa8e-881c9e48df55\") " pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:21 crc kubenswrapper[4884]: I1128 17:27:21.445901 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:22 crc kubenswrapper[4884]: I1128 17:27:22.062911 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j24qw"] Nov 28 17:27:22 crc kubenswrapper[4884]: W1128 17:27:22.077240 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb43cc6bf_3711_4f18_aa8e_881c9e48df55.slice/crio-d3fb7a638453228fa8eec1dcdf347f8e4a4d93eb84cbd4a74e6f4bd05d3b22b4 WatchSource:0}: Error finding container d3fb7a638453228fa8eec1dcdf347f8e4a4d93eb84cbd4a74e6f4bd05d3b22b4: Status 404 returned error can't find the container with id d3fb7a638453228fa8eec1dcdf347f8e4a4d93eb84cbd4a74e6f4bd05d3b22b4 Nov 28 17:27:22 crc kubenswrapper[4884]: I1128 17:27:22.248900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24qw" event={"ID":"b43cc6bf-3711-4f18-aa8e-881c9e48df55","Type":"ContainerStarted","Data":"d3fb7a638453228fa8eec1dcdf347f8e4a4d93eb84cbd4a74e6f4bd05d3b22b4"} Nov 28 17:27:23 crc kubenswrapper[4884]: I1128 17:27:23.258197 4884 generic.go:334] "Generic (PLEG): container finished" podID="b43cc6bf-3711-4f18-aa8e-881c9e48df55" containerID="d24fa54a31fffad517196a0ac83c411b747eb6e737863716d86e1da1fb271d46" exitCode=0 Nov 28 17:27:23 crc kubenswrapper[4884]: I1128 17:27:23.258294 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24qw" event={"ID":"b43cc6bf-3711-4f18-aa8e-881c9e48df55","Type":"ContainerDied","Data":"d24fa54a31fffad517196a0ac83c411b747eb6e737863716d86e1da1fb271d46"} Nov 28 17:27:28 crc kubenswrapper[4884]: I1128 17:27:28.323401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24qw" event={"ID":"b43cc6bf-3711-4f18-aa8e-881c9e48df55","Type":"ContainerStarted","Data":"47d0fb8f73a8792d573e19844ed706233de276cbc4d1fa6963c08b5d4892bbec"} Nov 28 17:27:29 crc kubenswrapper[4884]: I1128 17:27:29.334264 4884 generic.go:334] "Generic (PLEG): container finished" podID="b43cc6bf-3711-4f18-aa8e-881c9e48df55" containerID="47d0fb8f73a8792d573e19844ed706233de276cbc4d1fa6963c08b5d4892bbec" exitCode=0 Nov 28 17:27:29 crc kubenswrapper[4884]: I1128 17:27:29.334334 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24qw" event={"ID":"b43cc6bf-3711-4f18-aa8e-881c9e48df55","Type":"ContainerDied","Data":"47d0fb8f73a8792d573e19844ed706233de276cbc4d1fa6963c08b5d4892bbec"} Nov 28 17:27:30 crc kubenswrapper[4884]: I1128 17:27:30.374011 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j24qw" podStartSLOduration=2.5217934250000003 podStartE2EDuration="9.373991177s" podCreationTimestamp="2025-11-28 17:27:21 +0000 UTC" firstStartedPulling="2025-11-28 17:27:23.259994318 +0000 UTC m=+7682.822778119" lastFinishedPulling="2025-11-28 17:27:30.11219207 +0000 UTC m=+7689.674975871" observedRunningTime="2025-11-28 17:27:30.368991944 +0000 UTC m=+7689.931775755" watchObservedRunningTime="2025-11-28 17:27:30.373991177 +0000 UTC m=+7689.936774978" Nov 28 17:27:31 crc kubenswrapper[4884]: I1128 17:27:31.367070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24qw" event={"ID":"b43cc6bf-3711-4f18-aa8e-881c9e48df55","Type":"ContainerStarted","Data":"18d09bcd6c2e72834c8093e40b8d29ae7a2c4a8369e5883537cc54e8486d572b"} Nov 28 17:27:31 crc kubenswrapper[4884]: I1128 17:27:31.446082 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:31 crc kubenswrapper[4884]: I1128 17:27:31.446197 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:32 crc kubenswrapper[4884]: I1128 17:27:32.507246 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-j24qw" podUID="b43cc6bf-3711-4f18-aa8e-881c9e48df55" containerName="registry-server" probeResult="failure" output=< Nov 28 17:27:32 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:27:32 crc kubenswrapper[4884]: > Nov 28 17:27:41 crc kubenswrapper[4884]: I1128 17:27:41.514581 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:41 crc kubenswrapper[4884]: I1128 17:27:41.564427 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j24qw" Nov 28 17:27:41 crc kubenswrapper[4884]: I1128 17:27:41.627306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j24qw"] Nov 28 17:27:41 crc kubenswrapper[4884]: I1128 17:27:41.759348 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 17:27:41 crc kubenswrapper[4884]: I1128 17:27:41.759641 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4vdxb" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="registry-server" containerID="cri-o://403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" gracePeriod=2 Nov 28 17:27:41 crc kubenswrapper[4884]: E1128 17:27:41.842423 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 is running failed: container process not found" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:27:41 crc kubenswrapper[4884]: E1128 17:27:41.842827 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 is running failed: container process not found" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:27:41 crc kubenswrapper[4884]: E1128 17:27:41.843077 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 is running failed: container process not found" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 17:27:41 crc kubenswrapper[4884]: E1128 17:27:41.843119 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-4vdxb" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="registry-server" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.307713 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.472075 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities\") pod \"61968083-3a85-48df-8adb-727b32a9720d\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.472242 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content\") pod \"61968083-3a85-48df-8adb-727b32a9720d\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.472291 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpz6x\" (UniqueName: \"kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x\") pod \"61968083-3a85-48df-8adb-727b32a9720d\" (UID: \"61968083-3a85-48df-8adb-727b32a9720d\") " Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.481928 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities" (OuterVolumeSpecName: "utilities") pod "61968083-3a85-48df-8adb-727b32a9720d" (UID: "61968083-3a85-48df-8adb-727b32a9720d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.495628 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x" (OuterVolumeSpecName: "kube-api-access-dpz6x") pod "61968083-3a85-48df-8adb-727b32a9720d" (UID: "61968083-3a85-48df-8adb-727b32a9720d"). InnerVolumeSpecName "kube-api-access-dpz6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.500463 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerDied","Data":"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62"} Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.500507 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4vdxb" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.500534 4884 scope.go:117] "RemoveContainer" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.500423 4884 generic.go:334] "Generic (PLEG): container finished" podID="61968083-3a85-48df-8adb-727b32a9720d" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" exitCode=0 Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.500633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4vdxb" event={"ID":"61968083-3a85-48df-8adb-727b32a9720d","Type":"ContainerDied","Data":"a5cbba13ca4f45aa15dfbafc3071e82649d431ed3e9ad6150b8f75a118d801fc"} Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.575006 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.575381 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpz6x\" (UniqueName: \"kubernetes.io/projected/61968083-3a85-48df-8adb-727b32a9720d-kube-api-access-dpz6x\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.662528 4884 scope.go:117] "RemoveContainer" containerID="0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.685205 4884 scope.go:117] "RemoveContainer" containerID="daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.842754 4884 scope.go:117] "RemoveContainer" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" Nov 28 17:27:42 crc kubenswrapper[4884]: E1128 17:27:42.843409 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62\": container with ID starting with 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 not found: ID does not exist" containerID="403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.843441 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62"} err="failed to get container status \"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62\": rpc error: code = NotFound desc = could not find container \"403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62\": container with ID starting with 403e9932d708cfe1fcea0ade30ebeb3d6c95703448e7e34c0a42d1ae27918b62 not found: ID does not exist" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.843467 4884 scope.go:117] "RemoveContainer" containerID="0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf" Nov 28 17:27:42 crc kubenswrapper[4884]: E1128 17:27:42.843692 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf\": container with ID starting with 0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf not found: ID does not exist" containerID="0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.843714 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf"} err="failed to get container status \"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf\": rpc error: code = NotFound desc = could not find container \"0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf\": container with ID starting with 0c2a70b203b403504679012d96b763f749aaf2b539644a75ceaf494781cbfabf not found: ID does not exist" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.843734 4884 scope.go:117] "RemoveContainer" containerID="daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e" Nov 28 17:27:42 crc kubenswrapper[4884]: E1128 17:27:42.843953 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e\": container with ID starting with daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e not found: ID does not exist" containerID="daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.843971 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e"} err="failed to get container status \"daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e\": rpc error: code = NotFound desc = could not find container \"daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e\": container with ID starting with daa7d39f4f62dad974025056262410a0072bb2bbb9852ff11e675024f6e7560e not found: ID does not exist" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.864038 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61968083-3a85-48df-8adb-727b32a9720d" (UID: "61968083-3a85-48df-8adb-727b32a9720d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:27:42 crc kubenswrapper[4884]: I1128 17:27:42.881295 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61968083-3a85-48df-8adb-727b32a9720d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:27:43 crc kubenswrapper[4884]: I1128 17:27:43.137123 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 17:27:43 crc kubenswrapper[4884]: I1128 17:27:43.146677 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4vdxb"] Nov 28 17:27:44 crc kubenswrapper[4884]: I1128 17:27:44.699681 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61968083-3a85-48df-8adb-727b32a9720d" path="/var/lib/kubelet/pods/61968083-3a85-48df-8adb-727b32a9720d/volumes" Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.243499 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.244060 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.244130 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.244893 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.244955 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88" gracePeriod=600 Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.610897 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88" exitCode=0 Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.610933 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88"} Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.611448 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53"} Nov 28 17:27:51 crc kubenswrapper[4884]: I1128 17:27:51.611495 4884 scope.go:117] "RemoveContainer" containerID="bfe1b359e4d7188d2fca0ffd0b18a44239571a072b24a4335ecfec23f467c9df" Nov 28 17:28:29 crc kubenswrapper[4884]: I1128 17:28:29.982763 4884 generic.go:334] "Generic (PLEG): container finished" podID="a4bfa85c-2676-434e-bd4e-bf610fe32231" containerID="a3ab6d6c0018b4d48b5f46e1130f7a7b5dad76aa69fd4f8280bd88b9cf5392c6" exitCode=0 Nov 28 17:28:29 crc kubenswrapper[4884]: I1128 17:28:29.982872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" event={"ID":"a4bfa85c-2676-434e-bd4e-bf610fe32231","Type":"ContainerDied","Data":"a3ab6d6c0018b4d48b5f46e1130f7a7b5dad76aa69fd4f8280bd88b9cf5392c6"} Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.449014 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.623500 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jt5wx\" (UniqueName: \"kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx\") pod \"a4bfa85c-2676-434e-bd4e-bf610fe32231\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.623613 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory\") pod \"a4bfa85c-2676-434e-bd4e-bf610fe32231\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.623678 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph\") pod \"a4bfa85c-2676-434e-bd4e-bf610fe32231\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.623798 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key\") pod \"a4bfa85c-2676-434e-bd4e-bf610fe32231\" (UID: \"a4bfa85c-2676-434e-bd4e-bf610fe32231\") " Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.634126 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph" (OuterVolumeSpecName: "ceph") pod "a4bfa85c-2676-434e-bd4e-bf610fe32231" (UID: "a4bfa85c-2676-434e-bd4e-bf610fe32231"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.634721 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx" (OuterVolumeSpecName: "kube-api-access-jt5wx") pod "a4bfa85c-2676-434e-bd4e-bf610fe32231" (UID: "a4bfa85c-2676-434e-bd4e-bf610fe32231"). InnerVolumeSpecName "kube-api-access-jt5wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.663447 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a4bfa85c-2676-434e-bd4e-bf610fe32231" (UID: "a4bfa85c-2676-434e-bd4e-bf610fe32231"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.672619 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory" (OuterVolumeSpecName: "inventory") pod "a4bfa85c-2676-434e-bd4e-bf610fe32231" (UID: "a4bfa85c-2676-434e-bd4e-bf610fe32231"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.727742 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jt5wx\" (UniqueName: \"kubernetes.io/projected/a4bfa85c-2676-434e-bd4e-bf610fe32231-kube-api-access-jt5wx\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.727786 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.727810 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:31 crc kubenswrapper[4884]: I1128 17:28:31.727828 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a4bfa85c-2676-434e-bd4e-bf610fe32231-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.004179 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" event={"ID":"a4bfa85c-2676-434e-bd4e-bf610fe32231","Type":"ContainerDied","Data":"684a10f4b1930436a947a50c9f59a011c22873b0e5933c3f3de4e5cc709aef09"} Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.004643 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="684a10f4b1930436a947a50c9f59a011c22873b0e5933c3f3de4e5cc709aef09" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.004219 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-b67pg" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.084423 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-sb4x9"] Nov 28 17:28:32 crc kubenswrapper[4884]: E1128 17:28:32.084982 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="registry-server" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085000 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="registry-server" Nov 28 17:28:32 crc kubenswrapper[4884]: E1128 17:28:32.085016 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="extract-content" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085023 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="extract-content" Nov 28 17:28:32 crc kubenswrapper[4884]: E1128 17:28:32.085036 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="extract-utilities" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085042 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="extract-utilities" Nov 28 17:28:32 crc kubenswrapper[4884]: E1128 17:28:32.085065 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bfa85c-2676-434e-bd4e-bf610fe32231" containerName="configure-network-openstack-openstack-cell1" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085073 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bfa85c-2676-434e-bd4e-bf610fe32231" containerName="configure-network-openstack-openstack-cell1" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085322 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="61968083-3a85-48df-8adb-727b32a9720d" containerName="registry-server" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.085341 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bfa85c-2676-434e-bd4e-bf610fe32231" containerName="configure-network-openstack-openstack-cell1" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.086106 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.089135 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.089647 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.090049 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.091097 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.097206 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-sb4x9"] Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.237570 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.237678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jglrp\" (UniqueName: \"kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.237746 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.237785 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.340449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.340805 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jglrp\" (UniqueName: \"kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.340850 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.340886 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.345294 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.345596 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.345740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.359617 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jglrp\" (UniqueName: \"kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp\") pod \"validate-network-openstack-openstack-cell1-sb4x9\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.410630 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.936229 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-sb4x9"] Nov 28 17:28:32 crc kubenswrapper[4884]: I1128 17:28:32.936393 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:28:33 crc kubenswrapper[4884]: I1128 17:28:33.014426 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" event={"ID":"9e362df8-70ef-4d2a-b224-fb3dd8c05732","Type":"ContainerStarted","Data":"809077d4200bae450dd7c86bacf2ad3ded514c27884fce829a9d29c1318c3423"} Nov 28 17:28:34 crc kubenswrapper[4884]: I1128 17:28:34.027188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" event={"ID":"9e362df8-70ef-4d2a-b224-fb3dd8c05732","Type":"ContainerStarted","Data":"3561955dcb2b1e6118e18928ed5a7a59141176a60d36acf23ce4b295cadf7725"} Nov 28 17:28:34 crc kubenswrapper[4884]: I1128 17:28:34.055047 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" podStartSLOduration=1.460997344 podStartE2EDuration="2.055023669s" podCreationTimestamp="2025-11-28 17:28:32 +0000 UTC" firstStartedPulling="2025-11-28 17:28:32.93615402 +0000 UTC m=+7752.498937821" lastFinishedPulling="2025-11-28 17:28:33.530180345 +0000 UTC m=+7753.092964146" observedRunningTime="2025-11-28 17:28:34.048564131 +0000 UTC m=+7753.611347942" watchObservedRunningTime="2025-11-28 17:28:34.055023669 +0000 UTC m=+7753.617807480" Nov 28 17:28:39 crc kubenswrapper[4884]: I1128 17:28:39.081246 4884 generic.go:334] "Generic (PLEG): container finished" podID="9e362df8-70ef-4d2a-b224-fb3dd8c05732" containerID="3561955dcb2b1e6118e18928ed5a7a59141176a60d36acf23ce4b295cadf7725" exitCode=0 Nov 28 17:28:39 crc kubenswrapper[4884]: I1128 17:28:39.082201 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" event={"ID":"9e362df8-70ef-4d2a-b224-fb3dd8c05732","Type":"ContainerDied","Data":"3561955dcb2b1e6118e18928ed5a7a59141176a60d36acf23ce4b295cadf7725"} Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.605440 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.744024 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory\") pod \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.744397 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jglrp\" (UniqueName: \"kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp\") pod \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.744533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key\") pod \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.744664 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph\") pod \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\" (UID: \"9e362df8-70ef-4d2a-b224-fb3dd8c05732\") " Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.751278 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp" (OuterVolumeSpecName: "kube-api-access-jglrp") pod "9e362df8-70ef-4d2a-b224-fb3dd8c05732" (UID: "9e362df8-70ef-4d2a-b224-fb3dd8c05732"). InnerVolumeSpecName "kube-api-access-jglrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.752271 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph" (OuterVolumeSpecName: "ceph") pod "9e362df8-70ef-4d2a-b224-fb3dd8c05732" (UID: "9e362df8-70ef-4d2a-b224-fb3dd8c05732"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.782573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory" (OuterVolumeSpecName: "inventory") pod "9e362df8-70ef-4d2a-b224-fb3dd8c05732" (UID: "9e362df8-70ef-4d2a-b224-fb3dd8c05732"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.787287 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9e362df8-70ef-4d2a-b224-fb3dd8c05732" (UID: "9e362df8-70ef-4d2a-b224-fb3dd8c05732"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.853163 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.853209 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jglrp\" (UniqueName: \"kubernetes.io/projected/9e362df8-70ef-4d2a-b224-fb3dd8c05732-kube-api-access-jglrp\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.853223 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:40 crc kubenswrapper[4884]: I1128 17:28:40.853234 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9e362df8-70ef-4d2a-b224-fb3dd8c05732-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.106984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" event={"ID":"9e362df8-70ef-4d2a-b224-fb3dd8c05732","Type":"ContainerDied","Data":"809077d4200bae450dd7c86bacf2ad3ded514c27884fce829a9d29c1318c3423"} Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.107063 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="809077d4200bae450dd7c86bacf2ad3ded514c27884fce829a9d29c1318c3423" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.107107 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-sb4x9" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.182014 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-fcw4g"] Nov 28 17:28:41 crc kubenswrapper[4884]: E1128 17:28:41.182714 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e362df8-70ef-4d2a-b224-fb3dd8c05732" containerName="validate-network-openstack-openstack-cell1" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.182739 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e362df8-70ef-4d2a-b224-fb3dd8c05732" containerName="validate-network-openstack-openstack-cell1" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.183025 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e362df8-70ef-4d2a-b224-fb3dd8c05732" containerName="validate-network-openstack-openstack-cell1" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.184138 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.186479 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.186671 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.186850 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.187609 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.193902 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-fcw4g"] Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.260831 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mf52k\" (UniqueName: \"kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.260943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.260988 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.261333 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.365570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.366415 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mf52k\" (UniqueName: \"kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.366648 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.366866 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.370987 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.370996 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.371793 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.388565 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mf52k\" (UniqueName: \"kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k\") pod \"install-os-openstack-openstack-cell1-fcw4g\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:41 crc kubenswrapper[4884]: I1128 17:28:41.507043 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:28:42 crc kubenswrapper[4884]: I1128 17:28:42.071976 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-fcw4g"] Nov 28 17:28:42 crc kubenswrapper[4884]: I1128 17:28:42.119267 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" event={"ID":"51790cc2-00ff-4f07-9231-515e37777c81","Type":"ContainerStarted","Data":"e4039146232c4bb32f8421be4b55d7bbacc4e475af08dfa38ec8f1c1af9f1190"} Nov 28 17:28:43 crc kubenswrapper[4884]: I1128 17:28:43.129661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" event={"ID":"51790cc2-00ff-4f07-9231-515e37777c81","Type":"ContainerStarted","Data":"1a7e0424499250f3e5649af64befdcbfa6701a2171148229ab3aba875631982e"} Nov 28 17:28:43 crc kubenswrapper[4884]: I1128 17:28:43.148594 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" podStartSLOduration=1.5595170779999998 podStartE2EDuration="2.148577531s" podCreationTimestamp="2025-11-28 17:28:41 +0000 UTC" firstStartedPulling="2025-11-28 17:28:42.083459513 +0000 UTC m=+7761.646243314" lastFinishedPulling="2025-11-28 17:28:42.672519966 +0000 UTC m=+7762.235303767" observedRunningTime="2025-11-28 17:28:43.146327475 +0000 UTC m=+7762.709111286" watchObservedRunningTime="2025-11-28 17:28:43.148577531 +0000 UTC m=+7762.711361332" Nov 28 17:29:29 crc kubenswrapper[4884]: I1128 17:29:29.615239 4884 generic.go:334] "Generic (PLEG): container finished" podID="51790cc2-00ff-4f07-9231-515e37777c81" containerID="1a7e0424499250f3e5649af64befdcbfa6701a2171148229ab3aba875631982e" exitCode=0 Nov 28 17:29:29 crc kubenswrapper[4884]: I1128 17:29:29.615387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" event={"ID":"51790cc2-00ff-4f07-9231-515e37777c81","Type":"ContainerDied","Data":"1a7e0424499250f3e5649af64befdcbfa6701a2171148229ab3aba875631982e"} Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.086495 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.159651 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph\") pod \"51790cc2-00ff-4f07-9231-515e37777c81\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.159700 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key\") pod \"51790cc2-00ff-4f07-9231-515e37777c81\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.159834 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory\") pod \"51790cc2-00ff-4f07-9231-515e37777c81\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.159861 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mf52k\" (UniqueName: \"kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k\") pod \"51790cc2-00ff-4f07-9231-515e37777c81\" (UID: \"51790cc2-00ff-4f07-9231-515e37777c81\") " Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.165557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph" (OuterVolumeSpecName: "ceph") pod "51790cc2-00ff-4f07-9231-515e37777c81" (UID: "51790cc2-00ff-4f07-9231-515e37777c81"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.167411 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k" (OuterVolumeSpecName: "kube-api-access-mf52k") pod "51790cc2-00ff-4f07-9231-515e37777c81" (UID: "51790cc2-00ff-4f07-9231-515e37777c81"). InnerVolumeSpecName "kube-api-access-mf52k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.188132 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory" (OuterVolumeSpecName: "inventory") pod "51790cc2-00ff-4f07-9231-515e37777c81" (UID: "51790cc2-00ff-4f07-9231-515e37777c81"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.188605 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "51790cc2-00ff-4f07-9231-515e37777c81" (UID: "51790cc2-00ff-4f07-9231-515e37777c81"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.263129 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.263160 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.263169 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/51790cc2-00ff-4f07-9231-515e37777c81-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.263178 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mf52k\" (UniqueName: \"kubernetes.io/projected/51790cc2-00ff-4f07-9231-515e37777c81-kube-api-access-mf52k\") on node \"crc\" DevicePath \"\"" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.641216 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" event={"ID":"51790cc2-00ff-4f07-9231-515e37777c81","Type":"ContainerDied","Data":"e4039146232c4bb32f8421be4b55d7bbacc4e475af08dfa38ec8f1c1af9f1190"} Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.641614 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4039146232c4bb32f8421be4b55d7bbacc4e475af08dfa38ec8f1c1af9f1190" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.641359 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-fcw4g" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.723450 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z74tb"] Nov 28 17:29:31 crc kubenswrapper[4884]: E1128 17:29:31.724216 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51790cc2-00ff-4f07-9231-515e37777c81" containerName="install-os-openstack-openstack-cell1" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.724319 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="51790cc2-00ff-4f07-9231-515e37777c81" containerName="install-os-openstack-openstack-cell1" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.724665 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="51790cc2-00ff-4f07-9231-515e37777c81" containerName="install-os-openstack-openstack-cell1" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.725803 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.731464 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.731991 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.732155 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.732501 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.736118 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z74tb"] Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.785434 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.785512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.785560 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km9h8\" (UniqueName: \"kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.785608 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.887627 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.887877 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.887936 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.887985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km9h8\" (UniqueName: \"kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.891934 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.891945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.896577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:31 crc kubenswrapper[4884]: I1128 17:29:31.903907 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km9h8\" (UniqueName: \"kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8\") pod \"configure-os-openstack-openstack-cell1-z74tb\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:32 crc kubenswrapper[4884]: I1128 17:29:32.046681 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:29:33 crc kubenswrapper[4884]: I1128 17:29:32.751463 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-z74tb"] Nov 28 17:29:33 crc kubenswrapper[4884]: W1128 17:29:32.751845 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9671879e_8915_469b_882c_b25003ce0d21.slice/crio-af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9 WatchSource:0}: Error finding container af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9: Status 404 returned error can't find the container with id af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9 Nov 28 17:29:33 crc kubenswrapper[4884]: I1128 17:29:33.685170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" event={"ID":"9671879e-8915-469b-882c-b25003ce0d21","Type":"ContainerStarted","Data":"ed5ad1cb0d104ff96b2925b69d7174314e66a099ba2f52ab1077e3f89b5ecbca"} Nov 28 17:29:33 crc kubenswrapper[4884]: I1128 17:29:33.685521 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" event={"ID":"9671879e-8915-469b-882c-b25003ce0d21","Type":"ContainerStarted","Data":"af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9"} Nov 28 17:29:33 crc kubenswrapper[4884]: I1128 17:29:33.711039 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" podStartSLOduration=2.206657717 podStartE2EDuration="2.711019587s" podCreationTimestamp="2025-11-28 17:29:31 +0000 UTC" firstStartedPulling="2025-11-28 17:29:32.754133501 +0000 UTC m=+7812.316917302" lastFinishedPulling="2025-11-28 17:29:33.258495371 +0000 UTC m=+7812.821279172" observedRunningTime="2025-11-28 17:29:33.699966766 +0000 UTC m=+7813.262750567" watchObservedRunningTime="2025-11-28 17:29:33.711019587 +0000 UTC m=+7813.273803378" Nov 28 17:29:51 crc kubenswrapper[4884]: I1128 17:29:51.243809 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:29:51 crc kubenswrapper[4884]: I1128 17:29:51.244473 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.162435 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v"] Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.165457 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.168636 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.169001 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.180404 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v"] Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.254313 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qcsf\" (UniqueName: \"kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.254404 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.254480 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.356810 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qcsf\" (UniqueName: \"kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.356887 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.356948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.357775 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.362829 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.374877 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qcsf\" (UniqueName: \"kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf\") pod \"collect-profiles-29405850-pvf9v\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.495538 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.955468 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v"] Nov 28 17:30:00 crc kubenswrapper[4884]: I1128 17:30:00.974230 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" event={"ID":"48016531-4bcd-4141-b403-f8c563192ce7","Type":"ContainerStarted","Data":"c4b4626adfa9cb2f0db9869561b0067025ea594254464f41277f197d4bb498ef"} Nov 28 17:30:01 crc kubenswrapper[4884]: I1128 17:30:01.986563 4884 generic.go:334] "Generic (PLEG): container finished" podID="48016531-4bcd-4141-b403-f8c563192ce7" containerID="2e571cccf3b0f63ed720de15839bcb9522e76141f7cfbf56caffba22d4178449" exitCode=0 Nov 28 17:30:01 crc kubenswrapper[4884]: I1128 17:30:01.986638 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" event={"ID":"48016531-4bcd-4141-b403-f8c563192ce7","Type":"ContainerDied","Data":"2e571cccf3b0f63ed720de15839bcb9522e76141f7cfbf56caffba22d4178449"} Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.371999 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.421602 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume\") pod \"48016531-4bcd-4141-b403-f8c563192ce7\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.421754 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume\") pod \"48016531-4bcd-4141-b403-f8c563192ce7\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.422074 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qcsf\" (UniqueName: \"kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf\") pod \"48016531-4bcd-4141-b403-f8c563192ce7\" (UID: \"48016531-4bcd-4141-b403-f8c563192ce7\") " Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.422658 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume" (OuterVolumeSpecName: "config-volume") pod "48016531-4bcd-4141-b403-f8c563192ce7" (UID: "48016531-4bcd-4141-b403-f8c563192ce7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.427958 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "48016531-4bcd-4141-b403-f8c563192ce7" (UID: "48016531-4bcd-4141-b403-f8c563192ce7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.429302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf" (OuterVolumeSpecName: "kube-api-access-6qcsf") pod "48016531-4bcd-4141-b403-f8c563192ce7" (UID: "48016531-4bcd-4141-b403-f8c563192ce7"). InnerVolumeSpecName "kube-api-access-6qcsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.524511 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/48016531-4bcd-4141-b403-f8c563192ce7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.524567 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qcsf\" (UniqueName: \"kubernetes.io/projected/48016531-4bcd-4141-b403-f8c563192ce7-kube-api-access-6qcsf\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:03 crc kubenswrapper[4884]: I1128 17:30:03.524583 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/48016531-4bcd-4141-b403-f8c563192ce7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.012076 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" event={"ID":"48016531-4bcd-4141-b403-f8c563192ce7","Type":"ContainerDied","Data":"c4b4626adfa9cb2f0db9869561b0067025ea594254464f41277f197d4bb498ef"} Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.012430 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4b4626adfa9cb2f0db9869561b0067025ea594254464f41277f197d4bb498ef" Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.012163 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v" Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.454943 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64"] Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.464159 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405805-xmw64"] Nov 28 17:30:04 crc kubenswrapper[4884]: I1128 17:30:04.707345 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6742bb97-7915-4d02-859d-472f0576c8d3" path="/var/lib/kubelet/pods/6742bb97-7915-4d02-859d-472f0576c8d3/volumes" Nov 28 17:30:18 crc kubenswrapper[4884]: I1128 17:30:18.153009 4884 generic.go:334] "Generic (PLEG): container finished" podID="9671879e-8915-469b-882c-b25003ce0d21" containerID="ed5ad1cb0d104ff96b2925b69d7174314e66a099ba2f52ab1077e3f89b5ecbca" exitCode=0 Nov 28 17:30:18 crc kubenswrapper[4884]: I1128 17:30:18.153657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" event={"ID":"9671879e-8915-469b-882c-b25003ce0d21","Type":"ContainerDied","Data":"ed5ad1cb0d104ff96b2925b69d7174314e66a099ba2f52ab1077e3f89b5ecbca"} Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.612581 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.787606 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph\") pod \"9671879e-8915-469b-882c-b25003ce0d21\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.787677 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key\") pod \"9671879e-8915-469b-882c-b25003ce0d21\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.787764 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km9h8\" (UniqueName: \"kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8\") pod \"9671879e-8915-469b-882c-b25003ce0d21\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.788612 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory\") pod \"9671879e-8915-469b-882c-b25003ce0d21\" (UID: \"9671879e-8915-469b-882c-b25003ce0d21\") " Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.795693 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph" (OuterVolumeSpecName: "ceph") pod "9671879e-8915-469b-882c-b25003ce0d21" (UID: "9671879e-8915-469b-882c-b25003ce0d21"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.796294 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8" (OuterVolumeSpecName: "kube-api-access-km9h8") pod "9671879e-8915-469b-882c-b25003ce0d21" (UID: "9671879e-8915-469b-882c-b25003ce0d21"). InnerVolumeSpecName "kube-api-access-km9h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.819188 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory" (OuterVolumeSpecName: "inventory") pod "9671879e-8915-469b-882c-b25003ce0d21" (UID: "9671879e-8915-469b-882c-b25003ce0d21"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.825179 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9671879e-8915-469b-882c-b25003ce0d21" (UID: "9671879e-8915-469b-882c-b25003ce0d21"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.890989 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.891172 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.891231 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km9h8\" (UniqueName: \"kubernetes.io/projected/9671879e-8915-469b-882c-b25003ce0d21-kube-api-access-km9h8\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:19 crc kubenswrapper[4884]: I1128 17:30:19.891290 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9671879e-8915-469b-882c-b25003ce0d21-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.175999 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" event={"ID":"9671879e-8915-469b-882c-b25003ce0d21","Type":"ContainerDied","Data":"af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9"} Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.176055 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-z74tb" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.176063 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af99f68a1f1e15cb62b61b73ab148bf622b5f64b1d2d45e9cc4d02678fdc39f9" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.250473 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-hstkk"] Nov 28 17:30:20 crc kubenswrapper[4884]: E1128 17:30:20.251027 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9671879e-8915-469b-882c-b25003ce0d21" containerName="configure-os-openstack-openstack-cell1" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.251050 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9671879e-8915-469b-882c-b25003ce0d21" containerName="configure-os-openstack-openstack-cell1" Nov 28 17:30:20 crc kubenswrapper[4884]: E1128 17:30:20.251112 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48016531-4bcd-4141-b403-f8c563192ce7" containerName="collect-profiles" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.251121 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="48016531-4bcd-4141-b403-f8c563192ce7" containerName="collect-profiles" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.251383 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="48016531-4bcd-4141-b403-f8c563192ce7" containerName="collect-profiles" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.251421 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9671879e-8915-469b-882c-b25003ce0d21" containerName="configure-os-openstack-openstack-cell1" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.252376 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.254969 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.255038 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.255274 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.255311 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.259879 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-hstkk"] Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.299731 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.299826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.299848 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tms4\" (UniqueName: \"kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.299922 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.402252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.402310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tms4\" (UniqueName: \"kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.402438 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.402583 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.406994 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.407027 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.407265 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.422657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tms4\" (UniqueName: \"kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4\") pod \"ssh-known-hosts-openstack-hstkk\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:20 crc kubenswrapper[4884]: I1128 17:30:20.573203 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:21 crc kubenswrapper[4884]: I1128 17:30:21.193890 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-hstkk"] Nov 28 17:30:21 crc kubenswrapper[4884]: I1128 17:30:21.243203 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:30:21 crc kubenswrapper[4884]: I1128 17:30:21.243266 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:30:21 crc kubenswrapper[4884]: I1128 17:30:21.661462 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:30:22 crc kubenswrapper[4884]: I1128 17:30:22.197599 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-hstkk" event={"ID":"18dd9864-4a06-4605-812e-0e70eb1ede62","Type":"ContainerStarted","Data":"72dd22ce5f2cf2fa5f77b71c271bd086c398a262f03df9bc79badd42e7ff23d5"} Nov 28 17:30:22 crc kubenswrapper[4884]: I1128 17:30:22.197901 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-hstkk" event={"ID":"18dd9864-4a06-4605-812e-0e70eb1ede62","Type":"ContainerStarted","Data":"60024276fee1a4f4ecd89766c5c6dace31fc5f8cc20568c91637b2a3db0f16c1"} Nov 28 17:30:22 crc kubenswrapper[4884]: I1128 17:30:22.223042 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-hstkk" podStartSLOduration=1.761258174 podStartE2EDuration="2.223022648s" podCreationTimestamp="2025-11-28 17:30:20 +0000 UTC" firstStartedPulling="2025-11-28 17:30:21.195843873 +0000 UTC m=+7860.758627674" lastFinishedPulling="2025-11-28 17:30:21.657608347 +0000 UTC m=+7861.220392148" observedRunningTime="2025-11-28 17:30:22.21131522 +0000 UTC m=+7861.774099021" watchObservedRunningTime="2025-11-28 17:30:22.223022648 +0000 UTC m=+7861.785806449" Nov 28 17:30:31 crc kubenswrapper[4884]: I1128 17:30:31.280989 4884 generic.go:334] "Generic (PLEG): container finished" podID="18dd9864-4a06-4605-812e-0e70eb1ede62" containerID="72dd22ce5f2cf2fa5f77b71c271bd086c398a262f03df9bc79badd42e7ff23d5" exitCode=0 Nov 28 17:30:31 crc kubenswrapper[4884]: I1128 17:30:31.281058 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-hstkk" event={"ID":"18dd9864-4a06-4605-812e-0e70eb1ede62","Type":"ContainerDied","Data":"72dd22ce5f2cf2fa5f77b71c271bd086c398a262f03df9bc79badd42e7ff23d5"} Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.745322 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.863579 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1\") pod \"18dd9864-4a06-4605-812e-0e70eb1ede62\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.863672 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph\") pod \"18dd9864-4a06-4605-812e-0e70eb1ede62\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.863834 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0\") pod \"18dd9864-4a06-4605-812e-0e70eb1ede62\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.863885 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tms4\" (UniqueName: \"kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4\") pod \"18dd9864-4a06-4605-812e-0e70eb1ede62\" (UID: \"18dd9864-4a06-4605-812e-0e70eb1ede62\") " Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.868182 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4" (OuterVolumeSpecName: "kube-api-access-7tms4") pod "18dd9864-4a06-4605-812e-0e70eb1ede62" (UID: "18dd9864-4a06-4605-812e-0e70eb1ede62"). InnerVolumeSpecName "kube-api-access-7tms4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.869621 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph" (OuterVolumeSpecName: "ceph") pod "18dd9864-4a06-4605-812e-0e70eb1ede62" (UID: "18dd9864-4a06-4605-812e-0e70eb1ede62"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.890678 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "18dd9864-4a06-4605-812e-0e70eb1ede62" (UID: "18dd9864-4a06-4605-812e-0e70eb1ede62"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.890675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "18dd9864-4a06-4605-812e-0e70eb1ede62" (UID: "18dd9864-4a06-4605-812e-0e70eb1ede62"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.966677 4884 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.966717 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tms4\" (UniqueName: \"kubernetes.io/projected/18dd9864-4a06-4605-812e-0e70eb1ede62-kube-api-access-7tms4\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.966749 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:32 crc kubenswrapper[4884]: I1128 17:30:32.966759 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/18dd9864-4a06-4605-812e-0e70eb1ede62-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.303574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-hstkk" event={"ID":"18dd9864-4a06-4605-812e-0e70eb1ede62","Type":"ContainerDied","Data":"60024276fee1a4f4ecd89766c5c6dace31fc5f8cc20568c91637b2a3db0f16c1"} Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.303686 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60024276fee1a4f4ecd89766c5c6dace31fc5f8cc20568c91637b2a3db0f16c1" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.303621 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-hstkk" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.385051 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-pxx8h"] Nov 28 17:30:33 crc kubenswrapper[4884]: E1128 17:30:33.385611 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18dd9864-4a06-4605-812e-0e70eb1ede62" containerName="ssh-known-hosts-openstack" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.385634 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="18dd9864-4a06-4605-812e-0e70eb1ede62" containerName="ssh-known-hosts-openstack" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.385880 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="18dd9864-4a06-4605-812e-0e70eb1ede62" containerName="ssh-known-hosts-openstack" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.386761 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.389108 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.391207 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.391264 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.391209 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.399737 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-pxx8h"] Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.478069 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n46b\" (UniqueName: \"kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.478138 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.478182 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.479024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.581023 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.581430 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n46b\" (UniqueName: \"kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.581460 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.581498 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.584902 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.584992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.586798 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.600124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n46b\" (UniqueName: \"kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b\") pod \"run-os-openstack-openstack-cell1-pxx8h\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:33 crc kubenswrapper[4884]: I1128 17:30:33.714330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:34 crc kubenswrapper[4884]: I1128 17:30:34.215293 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-pxx8h"] Nov 28 17:30:34 crc kubenswrapper[4884]: I1128 17:30:34.315085 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" event={"ID":"d303124f-49ec-447f-b8df-ff946aa08d58","Type":"ContainerStarted","Data":"975a6a2da217239487359bdc09d5cf42f1cf83946e65043e447b4b8ff1c7c356"} Nov 28 17:30:35 crc kubenswrapper[4884]: I1128 17:30:35.336533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" event={"ID":"d303124f-49ec-447f-b8df-ff946aa08d58","Type":"ContainerStarted","Data":"54249441c77ea397b6bf5a75bbae48ed553cf2b413680de954ca3984ce5699cc"} Nov 28 17:30:35 crc kubenswrapper[4884]: I1128 17:30:35.370739 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" podStartSLOduration=1.732298449 podStartE2EDuration="2.370724476s" podCreationTimestamp="2025-11-28 17:30:33 +0000 UTC" firstStartedPulling="2025-11-28 17:30:34.221571872 +0000 UTC m=+7873.784355673" lastFinishedPulling="2025-11-28 17:30:34.859997899 +0000 UTC m=+7874.422781700" observedRunningTime="2025-11-28 17:30:35.368262066 +0000 UTC m=+7874.931045867" watchObservedRunningTime="2025-11-28 17:30:35.370724476 +0000 UTC m=+7874.933508277" Nov 28 17:30:37 crc kubenswrapper[4884]: I1128 17:30:37.509571 4884 scope.go:117] "RemoveContainer" containerID="602ed84b7660f0e227c280cbf10d5abbf30ef8a4a49d2d4388fc84bf767775df" Nov 28 17:30:43 crc kubenswrapper[4884]: I1128 17:30:43.443240 4884 generic.go:334] "Generic (PLEG): container finished" podID="d303124f-49ec-447f-b8df-ff946aa08d58" containerID="54249441c77ea397b6bf5a75bbae48ed553cf2b413680de954ca3984ce5699cc" exitCode=0 Nov 28 17:30:43 crc kubenswrapper[4884]: I1128 17:30:43.443338 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" event={"ID":"d303124f-49ec-447f-b8df-ff946aa08d58","Type":"ContainerDied","Data":"54249441c77ea397b6bf5a75bbae48ed553cf2b413680de954ca3984ce5699cc"} Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.048247 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.147132 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph\") pod \"d303124f-49ec-447f-b8df-ff946aa08d58\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.147202 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n46b\" (UniqueName: \"kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b\") pod \"d303124f-49ec-447f-b8df-ff946aa08d58\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.147305 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory\") pod \"d303124f-49ec-447f-b8df-ff946aa08d58\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.147348 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key\") pod \"d303124f-49ec-447f-b8df-ff946aa08d58\" (UID: \"d303124f-49ec-447f-b8df-ff946aa08d58\") " Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.152426 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b" (OuterVolumeSpecName: "kube-api-access-4n46b") pod "d303124f-49ec-447f-b8df-ff946aa08d58" (UID: "d303124f-49ec-447f-b8df-ff946aa08d58"). InnerVolumeSpecName "kube-api-access-4n46b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.154449 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph" (OuterVolumeSpecName: "ceph") pod "d303124f-49ec-447f-b8df-ff946aa08d58" (UID: "d303124f-49ec-447f-b8df-ff946aa08d58"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.178497 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d303124f-49ec-447f-b8df-ff946aa08d58" (UID: "d303124f-49ec-447f-b8df-ff946aa08d58"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.180479 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory" (OuterVolumeSpecName: "inventory") pod "d303124f-49ec-447f-b8df-ff946aa08d58" (UID: "d303124f-49ec-447f-b8df-ff946aa08d58"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.249982 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.250025 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n46b\" (UniqueName: \"kubernetes.io/projected/d303124f-49ec-447f-b8df-ff946aa08d58-kube-api-access-4n46b\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.250039 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.250051 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d303124f-49ec-447f-b8df-ff946aa08d58-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.464395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" event={"ID":"d303124f-49ec-447f-b8df-ff946aa08d58","Type":"ContainerDied","Data":"975a6a2da217239487359bdc09d5cf42f1cf83946e65043e447b4b8ff1c7c356"} Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.464434 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="975a6a2da217239487359bdc09d5cf42f1cf83946e65043e447b4b8ff1c7c356" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.464438 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-pxx8h" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.542257 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-m9qqn"] Nov 28 17:30:45 crc kubenswrapper[4884]: E1128 17:30:45.542749 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d303124f-49ec-447f-b8df-ff946aa08d58" containerName="run-os-openstack-openstack-cell1" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.542764 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d303124f-49ec-447f-b8df-ff946aa08d58" containerName="run-os-openstack-openstack-cell1" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.542962 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d303124f-49ec-447f-b8df-ff946aa08d58" containerName="run-os-openstack-openstack-cell1" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.543773 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.545991 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.546209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.546328 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.546658 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.565393 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-m9qqn"] Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.660241 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.660490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.660753 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.660957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7vlv\" (UniqueName: \"kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.762619 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.762950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.763101 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.763264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7vlv\" (UniqueName: \"kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.768890 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.769146 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.769307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.781760 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7vlv\" (UniqueName: \"kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv\") pod \"reboot-os-openstack-openstack-cell1-m9qqn\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:45 crc kubenswrapper[4884]: I1128 17:30:45.868648 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:30:46 crc kubenswrapper[4884]: I1128 17:30:46.415531 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-m9qqn"] Nov 28 17:30:46 crc kubenswrapper[4884]: I1128 17:30:46.474922 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" event={"ID":"d902a353-422e-45fc-90ca-6f46279c8954","Type":"ContainerStarted","Data":"6dba44b3d50730b07a53eb49cb9217fab421e7e655e14e2ef2adeb24697aee50"} Nov 28 17:30:47 crc kubenswrapper[4884]: I1128 17:30:47.486689 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" event={"ID":"d902a353-422e-45fc-90ca-6f46279c8954","Type":"ContainerStarted","Data":"594e320901c308e70b13282d253022686c2990be0b6cb277b11954cb81f9a3e7"} Nov 28 17:30:47 crc kubenswrapper[4884]: I1128 17:30:47.527815 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" podStartSLOduration=1.949602793 podStartE2EDuration="2.527791979s" podCreationTimestamp="2025-11-28 17:30:45 +0000 UTC" firstStartedPulling="2025-11-28 17:30:46.434797176 +0000 UTC m=+7885.997580977" lastFinishedPulling="2025-11-28 17:30:47.012986362 +0000 UTC m=+7886.575770163" observedRunningTime="2025-11-28 17:30:47.513670992 +0000 UTC m=+7887.076454813" watchObservedRunningTime="2025-11-28 17:30:47.527791979 +0000 UTC m=+7887.090575780" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.243679 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.244325 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.244387 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.245460 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.245537 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" gracePeriod=600 Nov 28 17:30:51 crc kubenswrapper[4884]: E1128 17:30:51.372626 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.531203 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" exitCode=0 Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.531246 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53"} Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.531285 4884 scope.go:117] "RemoveContainer" containerID="030beb3bce746039696177e34d1f5f166fcd801c3a5570e3e24b4dd09a6b5c88" Nov 28 17:30:51 crc kubenswrapper[4884]: I1128 17:30:51.532026 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:30:51 crc kubenswrapper[4884]: E1128 17:30:51.532421 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:31:03 crc kubenswrapper[4884]: I1128 17:31:03.643775 4884 generic.go:334] "Generic (PLEG): container finished" podID="d902a353-422e-45fc-90ca-6f46279c8954" containerID="594e320901c308e70b13282d253022686c2990be0b6cb277b11954cb81f9a3e7" exitCode=0 Nov 28 17:31:03 crc kubenswrapper[4884]: I1128 17:31:03.643867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" event={"ID":"d902a353-422e-45fc-90ca-6f46279c8954","Type":"ContainerDied","Data":"594e320901c308e70b13282d253022686c2990be0b6cb277b11954cb81f9a3e7"} Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.181048 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.289636 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key\") pod \"d902a353-422e-45fc-90ca-6f46279c8954\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.289801 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7vlv\" (UniqueName: \"kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv\") pod \"d902a353-422e-45fc-90ca-6f46279c8954\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.289838 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory\") pod \"d902a353-422e-45fc-90ca-6f46279c8954\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.289956 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph\") pod \"d902a353-422e-45fc-90ca-6f46279c8954\" (UID: \"d902a353-422e-45fc-90ca-6f46279c8954\") " Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.298817 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph" (OuterVolumeSpecName: "ceph") pod "d902a353-422e-45fc-90ca-6f46279c8954" (UID: "d902a353-422e-45fc-90ca-6f46279c8954"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.299451 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv" (OuterVolumeSpecName: "kube-api-access-m7vlv") pod "d902a353-422e-45fc-90ca-6f46279c8954" (UID: "d902a353-422e-45fc-90ca-6f46279c8954"). InnerVolumeSpecName "kube-api-access-m7vlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.322444 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d902a353-422e-45fc-90ca-6f46279c8954" (UID: "d902a353-422e-45fc-90ca-6f46279c8954"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.323482 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory" (OuterVolumeSpecName: "inventory") pod "d902a353-422e-45fc-90ca-6f46279c8954" (UID: "d902a353-422e-45fc-90ca-6f46279c8954"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.394903 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.394939 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.394949 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d902a353-422e-45fc-90ca-6f46279c8954-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.394959 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7vlv\" (UniqueName: \"kubernetes.io/projected/d902a353-422e-45fc-90ca-6f46279c8954-kube-api-access-m7vlv\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.664669 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" event={"ID":"d902a353-422e-45fc-90ca-6f46279c8954","Type":"ContainerDied","Data":"6dba44b3d50730b07a53eb49cb9217fab421e7e655e14e2ef2adeb24697aee50"} Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.666348 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6dba44b3d50730b07a53eb49cb9217fab421e7e655e14e2ef2adeb24697aee50" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.664710 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-m9qqn" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.687999 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:31:05 crc kubenswrapper[4884]: E1128 17:31:05.690165 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.823791 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-z44rr"] Nov 28 17:31:05 crc kubenswrapper[4884]: E1128 17:31:05.824382 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d902a353-422e-45fc-90ca-6f46279c8954" containerName="reboot-os-openstack-openstack-cell1" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.824410 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d902a353-422e-45fc-90ca-6f46279c8954" containerName="reboot-os-openstack-openstack-cell1" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.824746 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d902a353-422e-45fc-90ca-6f46279c8954" containerName="reboot-os-openstack-openstack-cell1" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.826268 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.832578 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.832653 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.833000 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.833144 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.843945 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-z44rr"] Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.904632 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.904923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905134 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905221 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbt6n\" (UniqueName: \"kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905538 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905680 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905731 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:05 crc kubenswrapper[4884]: I1128 17:31:05.905906 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008287 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008382 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008441 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008531 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008562 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbt6n\" (UniqueName: \"kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008647 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.008726 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.014977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.015248 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.015391 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.015580 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.015583 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.016655 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.018287 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.018488 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.018802 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.028242 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.031164 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.038634 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbt6n\" (UniqueName: \"kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n\") pod \"install-certs-openstack-openstack-cell1-z44rr\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.161462 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:06 crc kubenswrapper[4884]: I1128 17:31:06.760937 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-z44rr"] Nov 28 17:31:07 crc kubenswrapper[4884]: I1128 17:31:07.686982 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" event={"ID":"fd73d060-ec7b-4d2e-9811-3a6495f53a42","Type":"ContainerStarted","Data":"d413770071b5d914bee37cf5ba48acce8038d856ac7741beae6dc959de1add6e"} Nov 28 17:31:07 crc kubenswrapper[4884]: I1128 17:31:07.687421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" event={"ID":"fd73d060-ec7b-4d2e-9811-3a6495f53a42","Type":"ContainerStarted","Data":"4a337d3993e978fa0dac933a2019cf3c6b6cb447b04a55966a719fa0320e1200"} Nov 28 17:31:07 crc kubenswrapper[4884]: I1128 17:31:07.710702 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" podStartSLOduration=2.251938139 podStartE2EDuration="2.710663378s" podCreationTimestamp="2025-11-28 17:31:05 +0000 UTC" firstStartedPulling="2025-11-28 17:31:06.766986856 +0000 UTC m=+7906.329770657" lastFinishedPulling="2025-11-28 17:31:07.225712095 +0000 UTC m=+7906.788495896" observedRunningTime="2025-11-28 17:31:07.706165778 +0000 UTC m=+7907.268949579" watchObservedRunningTime="2025-11-28 17:31:07.710663378 +0000 UTC m=+7907.273447179" Nov 28 17:31:17 crc kubenswrapper[4884]: I1128 17:31:17.690026 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:31:17 crc kubenswrapper[4884]: E1128 17:31:17.691213 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:31:25 crc kubenswrapper[4884]: I1128 17:31:25.876020 4884 generic.go:334] "Generic (PLEG): container finished" podID="fd73d060-ec7b-4d2e-9811-3a6495f53a42" containerID="d413770071b5d914bee37cf5ba48acce8038d856ac7741beae6dc959de1add6e" exitCode=0 Nov 28 17:31:25 crc kubenswrapper[4884]: I1128 17:31:25.876127 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" event={"ID":"fd73d060-ec7b-4d2e-9811-3a6495f53a42","Type":"ContainerDied","Data":"d413770071b5d914bee37cf5ba48acce8038d856ac7741beae6dc959de1add6e"} Nov 28 17:31:25 crc kubenswrapper[4884]: E1128 17:31:25.936220 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd73d060_ec7b_4d2e_9811_3a6495f53a42.slice/crio-conmon-d413770071b5d914bee37cf5ba48acce8038d856ac7741beae6dc959de1add6e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd73d060_ec7b_4d2e_9811_3a6495f53a42.slice/crio-d413770071b5d914bee37cf5ba48acce8038d856ac7741beae6dc959de1add6e.scope\": RecentStats: unable to find data in memory cache]" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.337983 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.445719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.445928 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446310 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446386 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446586 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446816 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446925 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.446957 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbt6n\" (UniqueName: \"kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.447002 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.447073 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.447200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.447242 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle\") pod \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\" (UID: \"fd73d060-ec7b-4d2e-9811-3a6495f53a42\") " Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.451943 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.452188 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.452908 4884 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.452930 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.454558 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.454620 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.454812 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph" (OuterVolumeSpecName: "ceph") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.455187 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n" (OuterVolumeSpecName: "kube-api-access-gbt6n") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "kube-api-access-gbt6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.456058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.456430 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.457280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.467586 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.481308 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory" (OuterVolumeSpecName: "inventory") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.495551 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fd73d060-ec7b-4d2e-9811-3a6495f53a42" (UID: "fd73d060-ec7b-4d2e-9811-3a6495f53a42"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555047 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555112 4884 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555128 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555142 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555154 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555167 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555180 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555192 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555203 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fd73d060-ec7b-4d2e-9811-3a6495f53a42-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.555218 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbt6n\" (UniqueName: \"kubernetes.io/projected/fd73d060-ec7b-4d2e-9811-3a6495f53a42-kube-api-access-gbt6n\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.896619 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" event={"ID":"fd73d060-ec7b-4d2e-9811-3a6495f53a42","Type":"ContainerDied","Data":"4a337d3993e978fa0dac933a2019cf3c6b6cb447b04a55966a719fa0320e1200"} Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.896678 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a337d3993e978fa0dac933a2019cf3c6b6cb447b04a55966a719fa0320e1200" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.896706 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-z44rr" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.995447 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-mntbr"] Nov 28 17:31:27 crc kubenswrapper[4884]: E1128 17:31:27.995992 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd73d060-ec7b-4d2e-9811-3a6495f53a42" containerName="install-certs-openstack-openstack-cell1" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.996015 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd73d060-ec7b-4d2e-9811-3a6495f53a42" containerName="install-certs-openstack-openstack-cell1" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.996331 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd73d060-ec7b-4d2e-9811-3a6495f53a42" containerName="install-certs-openstack-openstack-cell1" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.997324 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.999327 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:31:27 crc kubenswrapper[4884]: I1128 17:31:27.999614 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.001172 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.001427 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.009743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-mntbr"] Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.065868 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.066103 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.066526 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxftn\" (UniqueName: \"kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.066787 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.168777 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxftn\" (UniqueName: \"kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.169281 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.169412 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.169534 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.174153 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.174185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.175198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.185121 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxftn\" (UniqueName: \"kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn\") pod \"ceph-client-openstack-openstack-cell1-mntbr\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.327255 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.886728 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-mntbr"] Nov 28 17:31:28 crc kubenswrapper[4884]: I1128 17:31:28.908843 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" event={"ID":"90ffd366-f202-4fd6-a806-6a7ee4d2a22a","Type":"ContainerStarted","Data":"68c98f3e0b8934f04b3138a38fa34ca97816c8192daa697bc9f8dc63efe891fc"} Nov 28 17:31:29 crc kubenswrapper[4884]: I1128 17:31:29.922290 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" event={"ID":"90ffd366-f202-4fd6-a806-6a7ee4d2a22a","Type":"ContainerStarted","Data":"fe8be3133aae72f68bc8242431ced647922c0b4bd1fd79c7110a80b8e878f569"} Nov 28 17:31:29 crc kubenswrapper[4884]: I1128 17:31:29.946070 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" podStartSLOduration=2.471799714 podStartE2EDuration="2.946034115s" podCreationTimestamp="2025-11-28 17:31:27 +0000 UTC" firstStartedPulling="2025-11-28 17:31:28.888742129 +0000 UTC m=+7928.451525930" lastFinishedPulling="2025-11-28 17:31:29.36297653 +0000 UTC m=+7928.925760331" observedRunningTime="2025-11-28 17:31:29.944025506 +0000 UTC m=+7929.506809337" watchObservedRunningTime="2025-11-28 17:31:29.946034115 +0000 UTC m=+7929.508817916" Nov 28 17:31:32 crc kubenswrapper[4884]: I1128 17:31:32.688817 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:31:32 crc kubenswrapper[4884]: E1128 17:31:32.689415 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:31:34 crc kubenswrapper[4884]: I1128 17:31:34.971970 4884 generic.go:334] "Generic (PLEG): container finished" podID="90ffd366-f202-4fd6-a806-6a7ee4d2a22a" containerID="fe8be3133aae72f68bc8242431ced647922c0b4bd1fd79c7110a80b8e878f569" exitCode=0 Nov 28 17:31:34 crc kubenswrapper[4884]: I1128 17:31:34.972181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" event={"ID":"90ffd366-f202-4fd6-a806-6a7ee4d2a22a","Type":"ContainerDied","Data":"fe8be3133aae72f68bc8242431ced647922c0b4bd1fd79c7110a80b8e878f569"} Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.423331 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.571834 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory\") pod \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.571935 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key\") pod \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.571985 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph\") pod \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.572125 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxftn\" (UniqueName: \"kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn\") pod \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\" (UID: \"90ffd366-f202-4fd6-a806-6a7ee4d2a22a\") " Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.588488 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn" (OuterVolumeSpecName: "kube-api-access-rxftn") pod "90ffd366-f202-4fd6-a806-6a7ee4d2a22a" (UID: "90ffd366-f202-4fd6-a806-6a7ee4d2a22a"). InnerVolumeSpecName "kube-api-access-rxftn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.589317 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph" (OuterVolumeSpecName: "ceph") pod "90ffd366-f202-4fd6-a806-6a7ee4d2a22a" (UID: "90ffd366-f202-4fd6-a806-6a7ee4d2a22a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.610232 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory" (OuterVolumeSpecName: "inventory") pod "90ffd366-f202-4fd6-a806-6a7ee4d2a22a" (UID: "90ffd366-f202-4fd6-a806-6a7ee4d2a22a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.610529 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "90ffd366-f202-4fd6-a806-6a7ee4d2a22a" (UID: "90ffd366-f202-4fd6-a806-6a7ee4d2a22a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.675437 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.675466 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.675478 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.675487 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxftn\" (UniqueName: \"kubernetes.io/projected/90ffd366-f202-4fd6-a806-6a7ee4d2a22a-kube-api-access-rxftn\") on node \"crc\" DevicePath \"\"" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.994632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" event={"ID":"90ffd366-f202-4fd6-a806-6a7ee4d2a22a","Type":"ContainerDied","Data":"68c98f3e0b8934f04b3138a38fa34ca97816c8192daa697bc9f8dc63efe891fc"} Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.994956 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68c98f3e0b8934f04b3138a38fa34ca97816c8192daa697bc9f8dc63efe891fc" Nov 28 17:31:36 crc kubenswrapper[4884]: I1128 17:31:36.994701 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-mntbr" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.075628 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-5gr76"] Nov 28 17:31:37 crc kubenswrapper[4884]: E1128 17:31:37.076162 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ffd366-f202-4fd6-a806-6a7ee4d2a22a" containerName="ceph-client-openstack-openstack-cell1" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.076185 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ffd366-f202-4fd6-a806-6a7ee4d2a22a" containerName="ceph-client-openstack-openstack-cell1" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.076468 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="90ffd366-f202-4fd6-a806-6a7ee4d2a22a" containerName="ceph-client-openstack-openstack-cell1" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.077454 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.086890 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g5xf\" (UniqueName: \"kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.086925 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.086957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.086999 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.087072 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.087129 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.088934 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.089206 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.089698 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.089757 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.090050 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.137132 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-5gr76"] Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192799 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g5xf\" (UniqueName: \"kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192874 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192906 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.192951 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.194236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.198778 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.198791 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.201967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.202725 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.208620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g5xf\" (UniqueName: \"kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf\") pod \"ovn-openstack-openstack-cell1-5gr76\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:37 crc kubenswrapper[4884]: I1128 17:31:37.457554 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:31:38 crc kubenswrapper[4884]: I1128 17:31:38.036819 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-5gr76"] Nov 28 17:31:39 crc kubenswrapper[4884]: I1128 17:31:39.013424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-5gr76" event={"ID":"b3a9fdf6-1044-49c8-ac27-e5d424219c3f","Type":"ContainerStarted","Data":"07189682527a45ae7e3d5d7807a0a30231d8fcfc7089409a9be436940e957c7d"} Nov 28 17:31:39 crc kubenswrapper[4884]: I1128 17:31:39.013740 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-5gr76" event={"ID":"b3a9fdf6-1044-49c8-ac27-e5d424219c3f","Type":"ContainerStarted","Data":"599eb299ad3341ffa51e307f148d3e232bfdb4ed36feec643afdf701efd6564a"} Nov 28 17:31:39 crc kubenswrapper[4884]: I1128 17:31:39.033291 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-5gr76" podStartSLOduration=1.3850283509999999 podStartE2EDuration="2.033272389s" podCreationTimestamp="2025-11-28 17:31:37 +0000 UTC" firstStartedPulling="2025-11-28 17:31:38.042034078 +0000 UTC m=+7937.604817879" lastFinishedPulling="2025-11-28 17:31:38.690278116 +0000 UTC m=+7938.253061917" observedRunningTime="2025-11-28 17:31:39.026623717 +0000 UTC m=+7938.589407518" watchObservedRunningTime="2025-11-28 17:31:39.033272389 +0000 UTC m=+7938.596056190" Nov 28 17:31:46 crc kubenswrapper[4884]: I1128 17:31:46.689181 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:31:46 crc kubenswrapper[4884]: E1128 17:31:46.689960 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:01 crc kubenswrapper[4884]: I1128 17:32:01.689551 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:32:01 crc kubenswrapper[4884]: E1128 17:32:01.690554 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:15 crc kubenswrapper[4884]: I1128 17:32:15.688935 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:32:15 crc kubenswrapper[4884]: E1128 17:32:15.689744 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:26 crc kubenswrapper[4884]: I1128 17:32:26.689381 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:32:26 crc kubenswrapper[4884]: E1128 17:32:26.691801 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:37 crc kubenswrapper[4884]: I1128 17:32:37.688914 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:32:37 crc kubenswrapper[4884]: E1128 17:32:37.689748 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:47 crc kubenswrapper[4884]: I1128 17:32:47.800411 4884 generic.go:334] "Generic (PLEG): container finished" podID="b3a9fdf6-1044-49c8-ac27-e5d424219c3f" containerID="07189682527a45ae7e3d5d7807a0a30231d8fcfc7089409a9be436940e957c7d" exitCode=0 Nov 28 17:32:47 crc kubenswrapper[4884]: I1128 17:32:47.800577 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-5gr76" event={"ID":"b3a9fdf6-1044-49c8-ac27-e5d424219c3f","Type":"ContainerDied","Data":"07189682527a45ae7e3d5d7807a0a30231d8fcfc7089409a9be436940e957c7d"} Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.257168 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366188 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366233 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366277 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366302 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7g5xf\" (UniqueName: \"kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366352 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.366575 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0\") pod \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\" (UID: \"b3a9fdf6-1044-49c8-ac27-e5d424219c3f\") " Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.373677 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.373723 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph" (OuterVolumeSpecName: "ceph") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.373772 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf" (OuterVolumeSpecName: "kube-api-access-7g5xf") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "kube-api-access-7g5xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.400483 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory" (OuterVolumeSpecName: "inventory") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.401417 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.418759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b3a9fdf6-1044-49c8-ac27-e5d424219c3f" (UID: "b3a9fdf6-1044-49c8-ac27-e5d424219c3f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469815 4884 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469846 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469856 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469864 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469873 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7g5xf\" (UniqueName: \"kubernetes.io/projected/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-kube-api-access-7g5xf\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.469880 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b3a9fdf6-1044-49c8-ac27-e5d424219c3f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.822168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-5gr76" event={"ID":"b3a9fdf6-1044-49c8-ac27-e5d424219c3f","Type":"ContainerDied","Data":"599eb299ad3341ffa51e307f148d3e232bfdb4ed36feec643afdf701efd6564a"} Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.822210 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="599eb299ad3341ffa51e307f148d3e232bfdb4ed36feec643afdf701efd6564a" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.822248 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-5gr76" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.914813 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-mmfl4"] Nov 28 17:32:49 crc kubenswrapper[4884]: E1128 17:32:49.915717 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3a9fdf6-1044-49c8-ac27-e5d424219c3f" containerName="ovn-openstack-openstack-cell1" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.915740 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3a9fdf6-1044-49c8-ac27-e5d424219c3f" containerName="ovn-openstack-openstack-cell1" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.915997 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3a9fdf6-1044-49c8-ac27-e5d424219c3f" containerName="ovn-openstack-openstack-cell1" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.917075 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.921768 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.921768 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.921929 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.922035 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.922057 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.922187 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.926044 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-mmfl4"] Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980450 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980532 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980750 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980858 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dwkk\" (UniqueName: \"kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:49 crc kubenswrapper[4884]: I1128 17:32:49.980911 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082345 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082393 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082451 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dwkk\" (UniqueName: \"kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082483 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082510 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082558 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.082588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.087430 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.087443 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.088574 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.088702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.089652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.090816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.099759 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dwkk\" (UniqueName: \"kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk\") pod \"neutron-metadata-openstack-openstack-cell1-mmfl4\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.242219 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.792297 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-mmfl4"] Nov 28 17:32:50 crc kubenswrapper[4884]: I1128 17:32:50.838447 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" event={"ID":"0a900132-e2fc-4366-97ca-67ca52ce4ee6","Type":"ContainerStarted","Data":"83676f1fd900a6637423a848d63fcfdfc88f50223226c93b73a8435126001fef"} Nov 28 17:32:51 crc kubenswrapper[4884]: I1128 17:32:51.851833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" event={"ID":"0a900132-e2fc-4366-97ca-67ca52ce4ee6","Type":"ContainerStarted","Data":"a73f4f3c87572d2686f09360b3a46cded61896db135427e785df790818bc0ede"} Nov 28 17:32:51 crc kubenswrapper[4884]: I1128 17:32:51.878738 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" podStartSLOduration=2.116488262 podStartE2EDuration="2.878715012s" podCreationTimestamp="2025-11-28 17:32:49 +0000 UTC" firstStartedPulling="2025-11-28 17:32:50.79797368 +0000 UTC m=+8010.360757481" lastFinishedPulling="2025-11-28 17:32:51.56020043 +0000 UTC m=+8011.122984231" observedRunningTime="2025-11-28 17:32:51.871896974 +0000 UTC m=+8011.434680805" watchObservedRunningTime="2025-11-28 17:32:51.878715012 +0000 UTC m=+8011.441498813" Nov 28 17:32:52 crc kubenswrapper[4884]: I1128 17:32:52.688719 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:32:52 crc kubenswrapper[4884]: E1128 17:32:52.689018 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.504013 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.508358 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.530854 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.621750 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.622007 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhj6t\" (UniqueName: \"kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.622235 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.724734 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.724878 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.724948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhj6t\" (UniqueName: \"kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.725556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.725556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.747842 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhj6t\" (UniqueName: \"kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t\") pod \"redhat-operators-2c5tp\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:55 crc kubenswrapper[4884]: I1128 17:32:55.878333 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:32:56 crc kubenswrapper[4884]: I1128 17:32:56.361056 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:32:56 crc kubenswrapper[4884]: W1128 17:32:56.370272 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7eb971c_f423_45e2_b6f6_dab8f7e25817.slice/crio-fedab2e48193ad4d8c10954eacd762762856ef762d7965701ca674f7916ce201 WatchSource:0}: Error finding container fedab2e48193ad4d8c10954eacd762762856ef762d7965701ca674f7916ce201: Status 404 returned error can't find the container with id fedab2e48193ad4d8c10954eacd762762856ef762d7965701ca674f7916ce201 Nov 28 17:32:56 crc kubenswrapper[4884]: I1128 17:32:56.914807 4884 generic.go:334] "Generic (PLEG): container finished" podID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerID="1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61" exitCode=0 Nov 28 17:32:56 crc kubenswrapper[4884]: I1128 17:32:56.914878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerDied","Data":"1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61"} Nov 28 17:32:56 crc kubenswrapper[4884]: I1128 17:32:56.915295 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerStarted","Data":"fedab2e48193ad4d8c10954eacd762762856ef762d7965701ca674f7916ce201"} Nov 28 17:32:58 crc kubenswrapper[4884]: I1128 17:32:58.934198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerStarted","Data":"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef"} Nov 28 17:33:00 crc kubenswrapper[4884]: I1128 17:33:00.953730 4884 generic.go:334] "Generic (PLEG): container finished" podID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerID="46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef" exitCode=0 Nov 28 17:33:00 crc kubenswrapper[4884]: I1128 17:33:00.953801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerDied","Data":"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef"} Nov 28 17:33:01 crc kubenswrapper[4884]: I1128 17:33:01.966036 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerStarted","Data":"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b"} Nov 28 17:33:01 crc kubenswrapper[4884]: I1128 17:33:01.992009 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2c5tp" podStartSLOduration=2.305610742 podStartE2EDuration="6.991988354s" podCreationTimestamp="2025-11-28 17:32:55 +0000 UTC" firstStartedPulling="2025-11-28 17:32:56.916671059 +0000 UTC m=+8016.479454860" lastFinishedPulling="2025-11-28 17:33:01.603048671 +0000 UTC m=+8021.165832472" observedRunningTime="2025-11-28 17:33:01.983538246 +0000 UTC m=+8021.546322047" watchObservedRunningTime="2025-11-28 17:33:01.991988354 +0000 UTC m=+8021.554772155" Nov 28 17:33:05 crc kubenswrapper[4884]: I1128 17:33:05.878683 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:05 crc kubenswrapper[4884]: I1128 17:33:05.878931 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:06 crc kubenswrapper[4884]: I1128 17:33:06.925802 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2c5tp" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="registry-server" probeResult="failure" output=< Nov 28 17:33:06 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:33:06 crc kubenswrapper[4884]: > Nov 28 17:33:07 crc kubenswrapper[4884]: I1128 17:33:07.689057 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:33:07 crc kubenswrapper[4884]: E1128 17:33:07.689917 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.008071 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.011621 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.023193 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.030488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.030573 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnxtb\" (UniqueName: \"kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.030764 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.133000 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.133158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.133203 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnxtb\" (UniqueName: \"kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.133618 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.133994 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.151754 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnxtb\" (UniqueName: \"kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb\") pod \"community-operators-spt8x\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.334806 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:14 crc kubenswrapper[4884]: W1128 17:33:14.847004 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01b55c66_9807_47cb_95c8_588afb706fd1.slice/crio-b0720467f67e833fadbfc343fed6d3d23956b7c0183a4e1d6a46b0d0271634bf WatchSource:0}: Error finding container b0720467f67e833fadbfc343fed6d3d23956b7c0183a4e1d6a46b0d0271634bf: Status 404 returned error can't find the container with id b0720467f67e833fadbfc343fed6d3d23956b7c0183a4e1d6a46b0d0271634bf Nov 28 17:33:14 crc kubenswrapper[4884]: I1128 17:33:14.849501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:15 crc kubenswrapper[4884]: I1128 17:33:15.104596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerStarted","Data":"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11"} Nov 28 17:33:15 crc kubenswrapper[4884]: I1128 17:33:15.104638 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerStarted","Data":"b0720467f67e833fadbfc343fed6d3d23956b7c0183a4e1d6a46b0d0271634bf"} Nov 28 17:33:15 crc kubenswrapper[4884]: I1128 17:33:15.934675 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:15 crc kubenswrapper[4884]: I1128 17:33:15.991611 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:16 crc kubenswrapper[4884]: I1128 17:33:16.122163 4884 generic.go:334] "Generic (PLEG): container finished" podID="01b55c66-9807-47cb-95c8-588afb706fd1" containerID="86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11" exitCode=0 Nov 28 17:33:16 crc kubenswrapper[4884]: I1128 17:33:16.122252 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerDied","Data":"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11"} Nov 28 17:33:18 crc kubenswrapper[4884]: I1128 17:33:18.140285 4884 generic.go:334] "Generic (PLEG): container finished" podID="01b55c66-9807-47cb-95c8-588afb706fd1" containerID="3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c" exitCode=0 Nov 28 17:33:18 crc kubenswrapper[4884]: I1128 17:33:18.140350 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerDied","Data":"3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c"} Nov 28 17:33:19 crc kubenswrapper[4884]: I1128 17:33:19.154194 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerStarted","Data":"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a"} Nov 28 17:33:19 crc kubenswrapper[4884]: I1128 17:33:19.176788 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-spt8x" podStartSLOduration=3.740042288 podStartE2EDuration="6.17677164s" podCreationTimestamp="2025-11-28 17:33:13 +0000 UTC" firstStartedPulling="2025-11-28 17:33:16.124849163 +0000 UTC m=+8035.687632954" lastFinishedPulling="2025-11-28 17:33:18.561578505 +0000 UTC m=+8038.124362306" observedRunningTime="2025-11-28 17:33:19.171171962 +0000 UTC m=+8038.733955773" watchObservedRunningTime="2025-11-28 17:33:19.17677164 +0000 UTC m=+8038.739555441" Nov 28 17:33:20 crc kubenswrapper[4884]: I1128 17:33:20.400841 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:33:20 crc kubenswrapper[4884]: I1128 17:33:20.401097 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2c5tp" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="registry-server" containerID="cri-o://3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b" gracePeriod=2 Nov 28 17:33:20 crc kubenswrapper[4884]: I1128 17:33:20.698975 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:33:20 crc kubenswrapper[4884]: E1128 17:33:20.699331 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:33:20 crc kubenswrapper[4884]: I1128 17:33:20.912960 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.102533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content\") pod \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.102783 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities\") pod \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.103033 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhj6t\" (UniqueName: \"kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t\") pod \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\" (UID: \"d7eb971c-f423-45e2-b6f6-dab8f7e25817\") " Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.103817 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities" (OuterVolumeSpecName: "utilities") pod "d7eb971c-f423-45e2-b6f6-dab8f7e25817" (UID: "d7eb971c-f423-45e2-b6f6-dab8f7e25817"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.108190 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t" (OuterVolumeSpecName: "kube-api-access-jhj6t") pod "d7eb971c-f423-45e2-b6f6-dab8f7e25817" (UID: "d7eb971c-f423-45e2-b6f6-dab8f7e25817"). InnerVolumeSpecName "kube-api-access-jhj6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.179244 4884 generic.go:334] "Generic (PLEG): container finished" podID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerID="3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b" exitCode=0 Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.179295 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerDied","Data":"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b"} Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.179333 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2c5tp" event={"ID":"d7eb971c-f423-45e2-b6f6-dab8f7e25817","Type":"ContainerDied","Data":"fedab2e48193ad4d8c10954eacd762762856ef762d7965701ca674f7916ce201"} Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.179353 4884 scope.go:117] "RemoveContainer" containerID="3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.179354 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2c5tp" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.204929 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.204957 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhj6t\" (UniqueName: \"kubernetes.io/projected/d7eb971c-f423-45e2-b6f6-dab8f7e25817-kube-api-access-jhj6t\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.208901 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d7eb971c-f423-45e2-b6f6-dab8f7e25817" (UID: "d7eb971c-f423-45e2-b6f6-dab8f7e25817"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.213089 4884 scope.go:117] "RemoveContainer" containerID="46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.239662 4884 scope.go:117] "RemoveContainer" containerID="1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.295324 4884 scope.go:117] "RemoveContainer" containerID="3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b" Nov 28 17:33:21 crc kubenswrapper[4884]: E1128 17:33:21.295822 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b\": container with ID starting with 3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b not found: ID does not exist" containerID="3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.295858 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b"} err="failed to get container status \"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b\": rpc error: code = NotFound desc = could not find container \"3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b\": container with ID starting with 3f396453826e9e5c8822ef63a8ed9084beb2a51cb72f389bfe933a6fbb7a140b not found: ID does not exist" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.295879 4884 scope.go:117] "RemoveContainer" containerID="46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef" Nov 28 17:33:21 crc kubenswrapper[4884]: E1128 17:33:21.296339 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef\": container with ID starting with 46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef not found: ID does not exist" containerID="46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.296361 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef"} err="failed to get container status \"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef\": rpc error: code = NotFound desc = could not find container \"46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef\": container with ID starting with 46b3397c5964843f8acb731b9b6a81d371390cc28af416dfbfa8079a7d0fb3ef not found: ID does not exist" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.296376 4884 scope.go:117] "RemoveContainer" containerID="1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61" Nov 28 17:33:21 crc kubenswrapper[4884]: E1128 17:33:21.296711 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61\": container with ID starting with 1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61 not found: ID does not exist" containerID="1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.296740 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61"} err="failed to get container status \"1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61\": rpc error: code = NotFound desc = could not find container \"1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61\": container with ID starting with 1d6ec85d2a655cd9b01f2316c505585f85712c19df48c53a7d5db7eca72d6f61 not found: ID does not exist" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.307812 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d7eb971c-f423-45e2-b6f6-dab8f7e25817-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.530021 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:33:21 crc kubenswrapper[4884]: I1128 17:33:21.543383 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2c5tp"] Nov 28 17:33:22 crc kubenswrapper[4884]: I1128 17:33:22.701602 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" path="/var/lib/kubelet/pods/d7eb971c-f423-45e2-b6f6-dab8f7e25817/volumes" Nov 28 17:33:24 crc kubenswrapper[4884]: I1128 17:33:24.335428 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:24 crc kubenswrapper[4884]: I1128 17:33:24.336897 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:24 crc kubenswrapper[4884]: I1128 17:33:24.377397 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:25 crc kubenswrapper[4884]: I1128 17:33:25.272988 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:26 crc kubenswrapper[4884]: I1128 17:33:26.397885 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.239525 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-spt8x" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="registry-server" containerID="cri-o://9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a" gracePeriod=2 Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.729784 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.850486 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities\") pod \"01b55c66-9807-47cb-95c8-588afb706fd1\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.850582 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content\") pod \"01b55c66-9807-47cb-95c8-588afb706fd1\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.850765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnxtb\" (UniqueName: \"kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb\") pod \"01b55c66-9807-47cb-95c8-588afb706fd1\" (UID: \"01b55c66-9807-47cb-95c8-588afb706fd1\") " Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.851555 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities" (OuterVolumeSpecName: "utilities") pod "01b55c66-9807-47cb-95c8-588afb706fd1" (UID: "01b55c66-9807-47cb-95c8-588afb706fd1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.853633 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.858888 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb" (OuterVolumeSpecName: "kube-api-access-rnxtb") pod "01b55c66-9807-47cb-95c8-588afb706fd1" (UID: "01b55c66-9807-47cb-95c8-588afb706fd1"). InnerVolumeSpecName "kube-api-access-rnxtb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.907980 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01b55c66-9807-47cb-95c8-588afb706fd1" (UID: "01b55c66-9807-47cb-95c8-588afb706fd1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.955493 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnxtb\" (UniqueName: \"kubernetes.io/projected/01b55c66-9807-47cb-95c8-588afb706fd1-kube-api-access-rnxtb\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:27 crc kubenswrapper[4884]: I1128 17:33:27.955533 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01b55c66-9807-47cb-95c8-588afb706fd1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.252071 4884 generic.go:334] "Generic (PLEG): container finished" podID="01b55c66-9807-47cb-95c8-588afb706fd1" containerID="9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a" exitCode=0 Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.252136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerDied","Data":"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a"} Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.252162 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-spt8x" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.252174 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-spt8x" event={"ID":"01b55c66-9807-47cb-95c8-588afb706fd1","Type":"ContainerDied","Data":"b0720467f67e833fadbfc343fed6d3d23956b7c0183a4e1d6a46b0d0271634bf"} Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.252199 4884 scope.go:117] "RemoveContainer" containerID="9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.274069 4884 scope.go:117] "RemoveContainer" containerID="3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.296665 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.313870 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-spt8x"] Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.316556 4884 scope.go:117] "RemoveContainer" containerID="86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.360109 4884 scope.go:117] "RemoveContainer" containerID="9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a" Nov 28 17:33:28 crc kubenswrapper[4884]: E1128 17:33:28.360814 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a\": container with ID starting with 9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a not found: ID does not exist" containerID="9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.360898 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a"} err="failed to get container status \"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a\": rpc error: code = NotFound desc = could not find container \"9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a\": container with ID starting with 9a5f561ee7f3d43ee587365a5e80d4af8051ccec48ff4776e9578f82ae9ae45a not found: ID does not exist" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.360970 4884 scope.go:117] "RemoveContainer" containerID="3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c" Nov 28 17:33:28 crc kubenswrapper[4884]: E1128 17:33:28.361437 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c\": container with ID starting with 3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c not found: ID does not exist" containerID="3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.361471 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c"} err="failed to get container status \"3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c\": rpc error: code = NotFound desc = could not find container \"3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c\": container with ID starting with 3488f9c323ebd6875cd28dc6346c291b2272ab5f09b7875623fe6ddc173e183c not found: ID does not exist" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.361497 4884 scope.go:117] "RemoveContainer" containerID="86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11" Nov 28 17:33:28 crc kubenswrapper[4884]: E1128 17:33:28.361773 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11\": container with ID starting with 86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11 not found: ID does not exist" containerID="86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.361825 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11"} err="failed to get container status \"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11\": rpc error: code = NotFound desc = could not find container \"86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11\": container with ID starting with 86aa6cf6a6dcd54159e76f3299513a44c1fd37b711b875a175c032655329ab11 not found: ID does not exist" Nov 28 17:33:28 crc kubenswrapper[4884]: I1128 17:33:28.713862 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" path="/var/lib/kubelet/pods/01b55c66-9807-47cb-95c8-588afb706fd1/volumes" Nov 28 17:33:33 crc kubenswrapper[4884]: I1128 17:33:33.688708 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:33:33 crc kubenswrapper[4884]: E1128 17:33:33.689605 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:33:45 crc kubenswrapper[4884]: I1128 17:33:45.688940 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:33:45 crc kubenswrapper[4884]: E1128 17:33:45.689820 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:33:47 crc kubenswrapper[4884]: I1128 17:33:47.462535 4884 generic.go:334] "Generic (PLEG): container finished" podID="0a900132-e2fc-4366-97ca-67ca52ce4ee6" containerID="a73f4f3c87572d2686f09360b3a46cded61896db135427e785df790818bc0ede" exitCode=0 Nov 28 17:33:47 crc kubenswrapper[4884]: I1128 17:33:47.462631 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" event={"ID":"0a900132-e2fc-4366-97ca-67ca52ce4ee6","Type":"ContainerDied","Data":"a73f4f3c87572d2686f09360b3a46cded61896db135427e785df790818bc0ede"} Nov 28 17:33:48 crc kubenswrapper[4884]: I1128 17:33:48.905616 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024444 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024507 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024691 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024834 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024918 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.024958 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dwkk\" (UniqueName: \"kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk\") pod \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\" (UID: \"0a900132-e2fc-4366-97ca-67ca52ce4ee6\") " Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.029931 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.030299 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph" (OuterVolumeSpecName: "ceph") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.032214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk" (OuterVolumeSpecName: "kube-api-access-7dwkk") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "kube-api-access-7dwkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.056808 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.058018 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory" (OuterVolumeSpecName: "inventory") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.063301 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.067203 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "0a900132-e2fc-4366-97ca-67ca52ce4ee6" (UID: "0a900132-e2fc-4366-97ca-67ca52ce4ee6"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127182 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127216 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127227 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127237 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127246 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dwkk\" (UniqueName: \"kubernetes.io/projected/0a900132-e2fc-4366-97ca-67ca52ce4ee6-kube-api-access-7dwkk\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127256 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.127267 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a900132-e2fc-4366-97ca-67ca52ce4ee6-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.482667 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" event={"ID":"0a900132-e2fc-4366-97ca-67ca52ce4ee6","Type":"ContainerDied","Data":"83676f1fd900a6637423a848d63fcfdfc88f50223226c93b73a8435126001fef"} Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.482961 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83676f1fd900a6637423a848d63fcfdfc88f50223226c93b73a8435126001fef" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.482730 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-mmfl4" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.581942 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-l46sp"] Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582407 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582424 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582453 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a900132-e2fc-4366-97ca-67ca52ce4ee6" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582461 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a900132-e2fc-4366-97ca-67ca52ce4ee6" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582470 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="extract-content" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582476 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="extract-content" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582491 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="extract-utilities" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582497 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="extract-utilities" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582519 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="extract-utilities" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582525 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="extract-utilities" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582535 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582541 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: E1128 17:33:49.582549 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="extract-content" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582554 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="extract-content" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582738 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a900132-e2fc-4366-97ca-67ca52ce4ee6" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582754 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="01b55c66-9807-47cb-95c8-588afb706fd1" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.582764 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7eb971c-f423-45e2-b6f6-dab8f7e25817" containerName="registry-server" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.583572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.585604 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.585689 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.585843 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.586882 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.587628 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.599222 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-l46sp"] Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.637867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.637926 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.638050 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ch5j\" (UniqueName: \"kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.638170 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.638206 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.638230 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741471 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741521 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741557 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ch5j\" (UniqueName: \"kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741634 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741663 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.741699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.748647 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.749042 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.749334 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.751673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.764310 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.764468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ch5j\" (UniqueName: \"kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j\") pod \"libvirt-openstack-openstack-cell1-l46sp\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:49 crc kubenswrapper[4884]: I1128 17:33:49.913398 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:33:50 crc kubenswrapper[4884]: I1128 17:33:50.452951 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-l46sp"] Nov 28 17:33:50 crc kubenswrapper[4884]: I1128 17:33:50.461630 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:33:50 crc kubenswrapper[4884]: I1128 17:33:50.493219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" event={"ID":"07c45a00-0b6d-44ae-bf00-566fa5d81f4e","Type":"ContainerStarted","Data":"f2b2fac80f4f79aa1e71ef44280a8c994332c334913ed0c6a1851f172a201a68"} Nov 28 17:33:51 crc kubenswrapper[4884]: I1128 17:33:51.508488 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" event={"ID":"07c45a00-0b6d-44ae-bf00-566fa5d81f4e","Type":"ContainerStarted","Data":"b0a0292c6e738082442221c985f83ce88479a147bcbea69613c81727beb18e90"} Nov 28 17:33:51 crc kubenswrapper[4884]: I1128 17:33:51.550563 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" podStartSLOduration=2.027997637 podStartE2EDuration="2.550539365s" podCreationTimestamp="2025-11-28 17:33:49 +0000 UTC" firstStartedPulling="2025-11-28 17:33:50.461384916 +0000 UTC m=+8070.024168717" lastFinishedPulling="2025-11-28 17:33:50.983926644 +0000 UTC m=+8070.546710445" observedRunningTime="2025-11-28 17:33:51.538511869 +0000 UTC m=+8071.101295680" watchObservedRunningTime="2025-11-28 17:33:51.550539365 +0000 UTC m=+8071.113323166" Nov 28 17:33:59 crc kubenswrapper[4884]: I1128 17:33:59.688435 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:33:59 crc kubenswrapper[4884]: E1128 17:33:59.689431 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:34:14 crc kubenswrapper[4884]: I1128 17:34:14.688446 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:34:14 crc kubenswrapper[4884]: E1128 17:34:14.689505 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:34:28 crc kubenswrapper[4884]: I1128 17:34:28.688829 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:34:28 crc kubenswrapper[4884]: E1128 17:34:28.689651 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:34:41 crc kubenswrapper[4884]: I1128 17:34:41.689392 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:34:41 crc kubenswrapper[4884]: E1128 17:34:41.690763 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:34:55 crc kubenswrapper[4884]: I1128 17:34:55.689200 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:34:55 crc kubenswrapper[4884]: E1128 17:34:55.690147 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:35:10 crc kubenswrapper[4884]: I1128 17:35:10.697644 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:35:10 crc kubenswrapper[4884]: E1128 17:35:10.698648 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:35:24 crc kubenswrapper[4884]: I1128 17:35:24.688546 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:35:24 crc kubenswrapper[4884]: E1128 17:35:24.689589 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:35:37 crc kubenswrapper[4884]: I1128 17:35:37.689704 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:35:37 crc kubenswrapper[4884]: E1128 17:35:37.690453 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.005906 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.009912 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.024702 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.110855 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.110941 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrpqw\" (UniqueName: \"kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.110965 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.212861 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.212959 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrpqw\" (UniqueName: \"kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.212996 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.213405 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.213493 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.235697 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrpqw\" (UniqueName: \"kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw\") pod \"redhat-marketplace-2g59m\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.342655 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:44 crc kubenswrapper[4884]: I1128 17:35:44.850811 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:45 crc kubenswrapper[4884]: I1128 17:35:45.729627 4884 generic.go:334] "Generic (PLEG): container finished" podID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerID="c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9" exitCode=0 Nov 28 17:35:45 crc kubenswrapper[4884]: I1128 17:35:45.729896 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerDied","Data":"c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9"} Nov 28 17:35:45 crc kubenswrapper[4884]: I1128 17:35:45.729930 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerStarted","Data":"41e9034308e37196c96611c582850caffc96c41cadaf0c7b4ddb10d66311fcdf"} Nov 28 17:35:47 crc kubenswrapper[4884]: I1128 17:35:47.752601 4884 generic.go:334] "Generic (PLEG): container finished" podID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerID="a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75" exitCode=0 Nov 28 17:35:47 crc kubenswrapper[4884]: I1128 17:35:47.752663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerDied","Data":"a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75"} Nov 28 17:35:48 crc kubenswrapper[4884]: I1128 17:35:48.766559 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerStarted","Data":"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0"} Nov 28 17:35:48 crc kubenswrapper[4884]: I1128 17:35:48.787286 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2g59m" podStartSLOduration=3.151426999 podStartE2EDuration="5.787263965s" podCreationTimestamp="2025-11-28 17:35:43 +0000 UTC" firstStartedPulling="2025-11-28 17:35:45.732403727 +0000 UTC m=+8185.295187528" lastFinishedPulling="2025-11-28 17:35:48.368240693 +0000 UTC m=+8187.931024494" observedRunningTime="2025-11-28 17:35:48.781660188 +0000 UTC m=+8188.344443999" watchObservedRunningTime="2025-11-28 17:35:48.787263965 +0000 UTC m=+8188.350047766" Nov 28 17:35:50 crc kubenswrapper[4884]: I1128 17:35:50.700715 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:35:50 crc kubenswrapper[4884]: E1128 17:35:50.701446 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:35:54 crc kubenswrapper[4884]: I1128 17:35:54.343587 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:54 crc kubenswrapper[4884]: I1128 17:35:54.344812 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:54 crc kubenswrapper[4884]: I1128 17:35:54.388502 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:54 crc kubenswrapper[4884]: I1128 17:35:54.896082 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:54 crc kubenswrapper[4884]: I1128 17:35:54.947940 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:56 crc kubenswrapper[4884]: I1128 17:35:56.854522 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2g59m" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="registry-server" containerID="cri-o://f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0" gracePeriod=2 Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.374646 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.509612 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content\") pod \"cc68e86d-b96f-4a47-8e77-f80444b72115\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.509752 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrpqw\" (UniqueName: \"kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw\") pod \"cc68e86d-b96f-4a47-8e77-f80444b72115\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.509810 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities\") pod \"cc68e86d-b96f-4a47-8e77-f80444b72115\" (UID: \"cc68e86d-b96f-4a47-8e77-f80444b72115\") " Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.510837 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities" (OuterVolumeSpecName: "utilities") pod "cc68e86d-b96f-4a47-8e77-f80444b72115" (UID: "cc68e86d-b96f-4a47-8e77-f80444b72115"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.517510 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw" (OuterVolumeSpecName: "kube-api-access-qrpqw") pod "cc68e86d-b96f-4a47-8e77-f80444b72115" (UID: "cc68e86d-b96f-4a47-8e77-f80444b72115"). InnerVolumeSpecName "kube-api-access-qrpqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.533635 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc68e86d-b96f-4a47-8e77-f80444b72115" (UID: "cc68e86d-b96f-4a47-8e77-f80444b72115"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.612631 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.612683 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrpqw\" (UniqueName: \"kubernetes.io/projected/cc68e86d-b96f-4a47-8e77-f80444b72115-kube-api-access-qrpqw\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.612700 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc68e86d-b96f-4a47-8e77-f80444b72115-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.867129 4884 generic.go:334] "Generic (PLEG): container finished" podID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerID="f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0" exitCode=0 Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.867174 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerDied","Data":"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0"} Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.867200 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2g59m" event={"ID":"cc68e86d-b96f-4a47-8e77-f80444b72115","Type":"ContainerDied","Data":"41e9034308e37196c96611c582850caffc96c41cadaf0c7b4ddb10d66311fcdf"} Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.867195 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2g59m" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.867244 4884 scope.go:117] "RemoveContainer" containerID="f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.900017 4884 scope.go:117] "RemoveContainer" containerID="a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.914118 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.930230 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2g59m"] Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.937682 4884 scope.go:117] "RemoveContainer" containerID="c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.978958 4884 scope.go:117] "RemoveContainer" containerID="f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0" Nov 28 17:35:57 crc kubenswrapper[4884]: E1128 17:35:57.979383 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0\": container with ID starting with f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0 not found: ID does not exist" containerID="f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.979430 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0"} err="failed to get container status \"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0\": rpc error: code = NotFound desc = could not find container \"f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0\": container with ID starting with f6390e794ffddbf87af46823012469db8404f10b20c49933dca64ba274b925f0 not found: ID does not exist" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.979455 4884 scope.go:117] "RemoveContainer" containerID="a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75" Nov 28 17:35:57 crc kubenswrapper[4884]: E1128 17:35:57.979798 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75\": container with ID starting with a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75 not found: ID does not exist" containerID="a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.979836 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75"} err="failed to get container status \"a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75\": rpc error: code = NotFound desc = could not find container \"a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75\": container with ID starting with a13f0844f40b20d052064291726778d204d72ff44ebad8eb5d512dd381391e75 not found: ID does not exist" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.979848 4884 scope.go:117] "RemoveContainer" containerID="c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9" Nov 28 17:35:57 crc kubenswrapper[4884]: E1128 17:35:57.980294 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9\": container with ID starting with c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9 not found: ID does not exist" containerID="c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9" Nov 28 17:35:57 crc kubenswrapper[4884]: I1128 17:35:57.980340 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9"} err="failed to get container status \"c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9\": rpc error: code = NotFound desc = could not find container \"c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9\": container with ID starting with c5c21e4e3e5e0672710595f6b5736f9ba23f711010a64de3b3636b07e00267e9 not found: ID does not exist" Nov 28 17:35:58 crc kubenswrapper[4884]: I1128 17:35:58.700712 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" path="/var/lib/kubelet/pods/cc68e86d-b96f-4a47-8e77-f80444b72115/volumes" Nov 28 17:36:01 crc kubenswrapper[4884]: I1128 17:36:01.689254 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:36:02 crc kubenswrapper[4884]: I1128 17:36:02.917443 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231"} Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.094505 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:32 crc kubenswrapper[4884]: E1128 17:37:32.095531 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="extract-content" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.095544 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="extract-content" Nov 28 17:37:32 crc kubenswrapper[4884]: E1128 17:37:32.095563 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="extract-utilities" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.095571 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="extract-utilities" Nov 28 17:37:32 crc kubenswrapper[4884]: E1128 17:37:32.095597 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="registry-server" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.095604 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="registry-server" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.095848 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc68e86d-b96f-4a47-8e77-f80444b72115" containerName="registry-server" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.097343 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.163051 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.254871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.254997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrpg6\" (UniqueName: \"kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.255031 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.356699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.357212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrpg6\" (UniqueName: \"kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.357323 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.357495 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.357726 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.390939 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrpg6\" (UniqueName: \"kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6\") pod \"certified-operators-w8wsq\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.456331 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:32 crc kubenswrapper[4884]: I1128 17:37:32.978800 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:33 crc kubenswrapper[4884]: I1128 17:37:33.910651 4884 generic.go:334] "Generic (PLEG): container finished" podID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerID="36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd" exitCode=0 Nov 28 17:37:33 crc kubenswrapper[4884]: I1128 17:37:33.910721 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerDied","Data":"36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd"} Nov 28 17:37:33 crc kubenswrapper[4884]: I1128 17:37:33.910978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerStarted","Data":"9693386879a42ef29bd7f8090671e3a0c90da4977c3112f91ba058d4af28ca86"} Nov 28 17:37:35 crc kubenswrapper[4884]: I1128 17:37:35.942826 4884 generic.go:334] "Generic (PLEG): container finished" podID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerID="75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97" exitCode=0 Nov 28 17:37:35 crc kubenswrapper[4884]: I1128 17:37:35.943021 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerDied","Data":"75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97"} Nov 28 17:37:36 crc kubenswrapper[4884]: I1128 17:37:36.953894 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerStarted","Data":"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3"} Nov 28 17:37:36 crc kubenswrapper[4884]: I1128 17:37:36.973654 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w8wsq" podStartSLOduration=2.266913045 podStartE2EDuration="4.973630527s" podCreationTimestamp="2025-11-28 17:37:32 +0000 UTC" firstStartedPulling="2025-11-28 17:37:33.913518418 +0000 UTC m=+8293.476302239" lastFinishedPulling="2025-11-28 17:37:36.62023592 +0000 UTC m=+8296.183019721" observedRunningTime="2025-11-28 17:37:36.970111681 +0000 UTC m=+8296.532895482" watchObservedRunningTime="2025-11-28 17:37:36.973630527 +0000 UTC m=+8296.536414328" Nov 28 17:37:42 crc kubenswrapper[4884]: I1128 17:37:42.457005 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:42 crc kubenswrapper[4884]: I1128 17:37:42.457879 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:42 crc kubenswrapper[4884]: I1128 17:37:42.511161 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:43 crc kubenswrapper[4884]: I1128 17:37:43.069220 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:43 crc kubenswrapper[4884]: I1128 17:37:43.130295 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.038404 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w8wsq" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="registry-server" containerID="cri-o://536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3" gracePeriod=2 Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.576497 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.705597 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities\") pod \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.705944 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content\") pod \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.706168 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrpg6\" (UniqueName: \"kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6\") pod \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\" (UID: \"93de4cb4-885a-4e97-9e90-0fa4bb563e25\") " Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.708427 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities" (OuterVolumeSpecName: "utilities") pod "93de4cb4-885a-4e97-9e90-0fa4bb563e25" (UID: "93de4cb4-885a-4e97-9e90-0fa4bb563e25"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.727062 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6" (OuterVolumeSpecName: "kube-api-access-rrpg6") pod "93de4cb4-885a-4e97-9e90-0fa4bb563e25" (UID: "93de4cb4-885a-4e97-9e90-0fa4bb563e25"). InnerVolumeSpecName "kube-api-access-rrpg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.789795 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93de4cb4-885a-4e97-9e90-0fa4bb563e25" (UID: "93de4cb4-885a-4e97-9e90-0fa4bb563e25"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.810004 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrpg6\" (UniqueName: \"kubernetes.io/projected/93de4cb4-885a-4e97-9e90-0fa4bb563e25-kube-api-access-rrpg6\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.810043 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:45 crc kubenswrapper[4884]: I1128 17:37:45.810056 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93de4cb4-885a-4e97-9e90-0fa4bb563e25-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.050887 4884 generic.go:334] "Generic (PLEG): container finished" podID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerID="536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3" exitCode=0 Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.050945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerDied","Data":"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3"} Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.050968 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w8wsq" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.050990 4884 scope.go:117] "RemoveContainer" containerID="536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.050979 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w8wsq" event={"ID":"93de4cb4-885a-4e97-9e90-0fa4bb563e25","Type":"ContainerDied","Data":"9693386879a42ef29bd7f8090671e3a0c90da4977c3112f91ba058d4af28ca86"} Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.084063 4884 scope.go:117] "RemoveContainer" containerID="75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.095967 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.108401 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w8wsq"] Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.113742 4884 scope.go:117] "RemoveContainer" containerID="36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.169916 4884 scope.go:117] "RemoveContainer" containerID="536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3" Nov 28 17:37:46 crc kubenswrapper[4884]: E1128 17:37:46.170427 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3\": container with ID starting with 536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3 not found: ID does not exist" containerID="536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.170461 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3"} err="failed to get container status \"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3\": rpc error: code = NotFound desc = could not find container \"536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3\": container with ID starting with 536c6eb36e7e153ca941dff66ab3fd1836435948a9b3bb43e8956dd7de9d14f3 not found: ID does not exist" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.170487 4884 scope.go:117] "RemoveContainer" containerID="75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97" Nov 28 17:37:46 crc kubenswrapper[4884]: E1128 17:37:46.170718 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97\": container with ID starting with 75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97 not found: ID does not exist" containerID="75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.170752 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97"} err="failed to get container status \"75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97\": rpc error: code = NotFound desc = could not find container \"75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97\": container with ID starting with 75e4541f23d73422edf3d44fff173066f4e7040d82bb96a02f7369cbc92b0a97 not found: ID does not exist" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.170769 4884 scope.go:117] "RemoveContainer" containerID="36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd" Nov 28 17:37:46 crc kubenswrapper[4884]: E1128 17:37:46.171336 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd\": container with ID starting with 36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd not found: ID does not exist" containerID="36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.171365 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd"} err="failed to get container status \"36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd\": rpc error: code = NotFound desc = could not find container \"36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd\": container with ID starting with 36acf38c517b00d9de7acb929ae5244ab3cf0f08a308992439027ee235bcd2fd not found: ID does not exist" Nov 28 17:37:46 crc kubenswrapper[4884]: I1128 17:37:46.702517 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" path="/var/lib/kubelet/pods/93de4cb4-885a-4e97-9e90-0fa4bb563e25/volumes" Nov 28 17:38:21 crc kubenswrapper[4884]: I1128 17:38:21.243227 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:38:21 crc kubenswrapper[4884]: I1128 17:38:21.244891 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:38:40 crc kubenswrapper[4884]: I1128 17:38:40.644641 4884 generic.go:334] "Generic (PLEG): container finished" podID="07c45a00-0b6d-44ae-bf00-566fa5d81f4e" containerID="b0a0292c6e738082442221c985f83ce88479a147bcbea69613c81727beb18e90" exitCode=0 Nov 28 17:38:40 crc kubenswrapper[4884]: I1128 17:38:40.644721 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" event={"ID":"07c45a00-0b6d-44ae-bf00-566fa5d81f4e","Type":"ContainerDied","Data":"b0a0292c6e738082442221c985f83ce88479a147bcbea69613c81727beb18e90"} Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.186044 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.274801 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.274839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.274890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.275015 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ch5j\" (UniqueName: \"kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.275039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.275072 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory\") pod \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\" (UID: \"07c45a00-0b6d-44ae-bf00-566fa5d81f4e\") " Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.281320 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph" (OuterVolumeSpecName: "ceph") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.281787 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.282640 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j" (OuterVolumeSpecName: "kube-api-access-5ch5j") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "kube-api-access-5ch5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.306392 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.313065 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.321145 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory" (OuterVolumeSpecName: "inventory") pod "07c45a00-0b6d-44ae-bf00-566fa5d81f4e" (UID: "07c45a00-0b6d-44ae-bf00-566fa5d81f4e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378551 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378592 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378605 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378620 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ch5j\" (UniqueName: \"kubernetes.io/projected/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-kube-api-access-5ch5j\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378638 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.378656 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c45a00-0b6d-44ae-bf00-566fa5d81f4e-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.665851 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" event={"ID":"07c45a00-0b6d-44ae-bf00-566fa5d81f4e","Type":"ContainerDied","Data":"f2b2fac80f4f79aa1e71ef44280a8c994332c334913ed0c6a1851f172a201a68"} Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.666223 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2b2fac80f4f79aa1e71ef44280a8c994332c334913ed0c6a1851f172a201a68" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.665976 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-l46sp" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.768868 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-kgz9v"] Nov 28 17:38:42 crc kubenswrapper[4884]: E1128 17:38:42.769366 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07c45a00-0b6d-44ae-bf00-566fa5d81f4e" containerName="libvirt-openstack-openstack-cell1" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769384 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="07c45a00-0b6d-44ae-bf00-566fa5d81f4e" containerName="libvirt-openstack-openstack-cell1" Nov 28 17:38:42 crc kubenswrapper[4884]: E1128 17:38:42.769420 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="registry-server" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769427 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="registry-server" Nov 28 17:38:42 crc kubenswrapper[4884]: E1128 17:38:42.769440 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="extract-utilities" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769446 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="extract-utilities" Nov 28 17:38:42 crc kubenswrapper[4884]: E1128 17:38:42.769457 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="extract-content" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769463 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="extract-content" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769688 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="07c45a00-0b6d-44ae-bf00-566fa5d81f4e" containerName="libvirt-openstack-openstack-cell1" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.769714 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="93de4cb4-885a-4e97-9e90-0fa4bb563e25" containerName="registry-server" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.770470 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777215 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777336 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777283 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777299 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777323 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.777707 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.780193 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.784920 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-kgz9v"] Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.891977 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892057 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892131 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892226 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892299 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892471 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892543 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892748 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxv45\" (UniqueName: \"kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.892870 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995456 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995656 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995722 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995881 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxv45\" (UniqueName: \"kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.995978 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.996042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.996121 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.996174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.996448 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.996477 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.999724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:42 crc kubenswrapper[4884]: I1128 17:38:42.999757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.001050 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.001237 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.001303 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.002758 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.003611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.003989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.019228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxv45\" (UniqueName: \"kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45\") pod \"nova-cell1-openstack-openstack-cell1-kgz9v\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.087977 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.632351 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-kgz9v"] Nov 28 17:38:43 crc kubenswrapper[4884]: I1128 17:38:43.678581 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" event={"ID":"9b18111a-9199-4f55-8a8b-a740c1fec6dd","Type":"ContainerStarted","Data":"4700c08d7925b053c98ca32c56ec453b96125b2500552a76f21550a407cdb867"} Nov 28 17:38:44 crc kubenswrapper[4884]: I1128 17:38:44.709906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" event={"ID":"9b18111a-9199-4f55-8a8b-a740c1fec6dd","Type":"ContainerStarted","Data":"5fbdf537ee619d07080d88796322bda0eeeadcc60a2a3af9c4b183b5c40f8b7f"} Nov 28 17:38:44 crc kubenswrapper[4884]: I1128 17:38:44.739540 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" podStartSLOduration=2.039841609 podStartE2EDuration="2.739517718s" podCreationTimestamp="2025-11-28 17:38:42 +0000 UTC" firstStartedPulling="2025-11-28 17:38:43.631971675 +0000 UTC m=+8363.194755476" lastFinishedPulling="2025-11-28 17:38:44.331647784 +0000 UTC m=+8363.894431585" observedRunningTime="2025-11-28 17:38:44.717739004 +0000 UTC m=+8364.280522805" watchObservedRunningTime="2025-11-28 17:38:44.739517718 +0000 UTC m=+8364.302301529" Nov 28 17:38:51 crc kubenswrapper[4884]: I1128 17:38:51.243109 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:38:51 crc kubenswrapper[4884]: I1128 17:38:51.243907 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:39:21 crc kubenswrapper[4884]: I1128 17:39:21.243757 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:39:21 crc kubenswrapper[4884]: I1128 17:39:21.244406 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:39:21 crc kubenswrapper[4884]: I1128 17:39:21.244474 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:39:21 crc kubenswrapper[4884]: I1128 17:39:21.245386 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:39:21 crc kubenswrapper[4884]: I1128 17:39:21.245493 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231" gracePeriod=600 Nov 28 17:39:22 crc kubenswrapper[4884]: I1128 17:39:22.094837 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231" exitCode=0 Nov 28 17:39:22 crc kubenswrapper[4884]: I1128 17:39:22.094862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231"} Nov 28 17:39:22 crc kubenswrapper[4884]: I1128 17:39:22.095435 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42"} Nov 28 17:39:22 crc kubenswrapper[4884]: I1128 17:39:22.095459 4884 scope.go:117] "RemoveContainer" containerID="b6897c95bcadc1cb50302868db77914cf217ac2c52176e81c32c7a09d2a55d53" Nov 28 17:41:21 crc kubenswrapper[4884]: I1128 17:41:21.242979 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:41:21 crc kubenswrapper[4884]: I1128 17:41:21.243930 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:41:51 crc kubenswrapper[4884]: I1128 17:41:51.243184 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:41:51 crc kubenswrapper[4884]: I1128 17:41:51.245544 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:42:01 crc kubenswrapper[4884]: I1128 17:42:01.786192 4884 generic.go:334] "Generic (PLEG): container finished" podID="9b18111a-9199-4f55-8a8b-a740c1fec6dd" containerID="5fbdf537ee619d07080d88796322bda0eeeadcc60a2a3af9c4b183b5c40f8b7f" exitCode=0 Nov 28 17:42:01 crc kubenswrapper[4884]: I1128 17:42:01.786236 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" event={"ID":"9b18111a-9199-4f55-8a8b-a740c1fec6dd","Type":"ContainerDied","Data":"5fbdf537ee619d07080d88796322bda0eeeadcc60a2a3af9c4b183b5c40f8b7f"} Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.559864 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.720463 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.721518 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.721627 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722021 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722057 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722083 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722207 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722233 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722352 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.722413 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxv45\" (UniqueName: \"kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45\") pod \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\" (UID: \"9b18111a-9199-4f55-8a8b-a740c1fec6dd\") " Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.733674 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45" (OuterVolumeSpecName: "kube-api-access-vxv45") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "kube-api-access-vxv45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.734223 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.735564 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph" (OuterVolumeSpecName: "ceph") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.754533 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.755378 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.757845 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.763131 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.764013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.769054 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.769779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.769879 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory" (OuterVolumeSpecName: "inventory") pod "9b18111a-9199-4f55-8a8b-a740c1fec6dd" (UID: "9b18111a-9199-4f55-8a8b-a740c1fec6dd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.807739 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" event={"ID":"9b18111a-9199-4f55-8a8b-a740c1fec6dd","Type":"ContainerDied","Data":"4700c08d7925b053c98ca32c56ec453b96125b2500552a76f21550a407cdb867"} Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.807778 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4700c08d7925b053c98ca32c56ec453b96125b2500552a76f21550a407cdb867" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.807834 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-kgz9v" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.825979 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826020 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826033 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826046 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826059 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826072 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826104 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826118 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxv45\" (UniqueName: \"kubernetes.io/projected/9b18111a-9199-4f55-8a8b-a740c1fec6dd-kube-api-access-vxv45\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826129 4884 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826139 4884 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.826149 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/9b18111a-9199-4f55-8a8b-a740c1fec6dd-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.907736 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4tvmx"] Nov 28 17:42:03 crc kubenswrapper[4884]: E1128 17:42:03.908500 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b18111a-9199-4f55-8a8b-a740c1fec6dd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.908578 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b18111a-9199-4f55-8a8b-a740c1fec6dd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.908809 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b18111a-9199-4f55-8a8b-a740c1fec6dd" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.909892 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.913855 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.915810 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.916404 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.916415 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.916416 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:42:03 crc kubenswrapper[4884]: I1128 17:42:03.920999 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4tvmx"] Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.042544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.042798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjp4k\" (UniqueName: \"kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.042847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.042950 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.042977 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.043030 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.043228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.043314 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.144732 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145188 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjp4k\" (UniqueName: \"kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145278 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145303 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145337 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145429 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.145471 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.148625 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.148886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.149076 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.159993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.160311 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.162647 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjp4k\" (UniqueName: \"kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.164486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.192724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-4tvmx\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.244614 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.810963 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-4tvmx"] Nov 28 17:42:04 crc kubenswrapper[4884]: I1128 17:42:04.829812 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:42:05 crc kubenswrapper[4884]: I1128 17:42:05.833919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" event={"ID":"1c9d690e-b787-4b1a-a760-83f68d6a69a0","Type":"ContainerStarted","Data":"15004727253bab5535b8c407b98087d67f5dbc29d89947c20ae90829206b15e1"} Nov 28 17:42:05 crc kubenswrapper[4884]: I1128 17:42:05.834494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" event={"ID":"1c9d690e-b787-4b1a-a760-83f68d6a69a0","Type":"ContainerStarted","Data":"54534c72a30a734b8d929649671d94792e62502b1657cdbd8b56ee213949af3e"} Nov 28 17:42:05 crc kubenswrapper[4884]: I1128 17:42:05.860475 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" podStartSLOduration=2.193849692 podStartE2EDuration="2.860453521s" podCreationTimestamp="2025-11-28 17:42:03 +0000 UTC" firstStartedPulling="2025-11-28 17:42:04.829504896 +0000 UTC m=+8564.392288707" lastFinishedPulling="2025-11-28 17:42:05.496108735 +0000 UTC m=+8565.058892536" observedRunningTime="2025-11-28 17:42:05.850593119 +0000 UTC m=+8565.413376920" watchObservedRunningTime="2025-11-28 17:42:05.860453521 +0000 UTC m=+8565.423237332" Nov 28 17:42:21 crc kubenswrapper[4884]: I1128 17:42:21.243178 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:42:21 crc kubenswrapper[4884]: I1128 17:42:21.243743 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:42:21 crc kubenswrapper[4884]: I1128 17:42:21.243851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:42:21 crc kubenswrapper[4884]: I1128 17:42:21.244662 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:42:21 crc kubenswrapper[4884]: I1128 17:42:21.244729 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" gracePeriod=600 Nov 28 17:42:21 crc kubenswrapper[4884]: E1128 17:42:21.369892 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:42:22 crc kubenswrapper[4884]: I1128 17:42:22.006069 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" exitCode=0 Nov 28 17:42:22 crc kubenswrapper[4884]: I1128 17:42:22.006141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42"} Nov 28 17:42:22 crc kubenswrapper[4884]: I1128 17:42:22.006313 4884 scope.go:117] "RemoveContainer" containerID="05c307f0c62fc356e125aeacd2b5437f8886e4a13432417ceae6b5d02d400231" Nov 28 17:42:22 crc kubenswrapper[4884]: I1128 17:42:22.007175 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:42:22 crc kubenswrapper[4884]: E1128 17:42:22.007487 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:42:34 crc kubenswrapper[4884]: I1128 17:42:34.688486 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:42:34 crc kubenswrapper[4884]: E1128 17:42:34.690511 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:42:47 crc kubenswrapper[4884]: I1128 17:42:47.689739 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:42:47 crc kubenswrapper[4884]: E1128 17:42:47.691009 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:01 crc kubenswrapper[4884]: I1128 17:43:01.689072 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:43:01 crc kubenswrapper[4884]: E1128 17:43:01.689890 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:12 crc kubenswrapper[4884]: I1128 17:43:12.689520 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:43:12 crc kubenswrapper[4884]: E1128 17:43:12.690483 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:25 crc kubenswrapper[4884]: I1128 17:43:25.689244 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:43:25 crc kubenswrapper[4884]: E1128 17:43:25.690212 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:40 crc kubenswrapper[4884]: I1128 17:43:40.702868 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:43:40 crc kubenswrapper[4884]: E1128 17:43:40.703719 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:54 crc kubenswrapper[4884]: I1128 17:43:54.688139 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:43:54 crc kubenswrapper[4884]: E1128 17:43:54.689263 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.414703 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.424293 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.447911 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.502702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.502772 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.502798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db52m\" (UniqueName: \"kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.605878 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.606250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.606344 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db52m\" (UniqueName: \"kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.606718 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.606754 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.628931 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db52m\" (UniqueName: \"kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m\") pod \"redhat-operators-xn8pj\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:55 crc kubenswrapper[4884]: I1128 17:43:55.754364 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:43:56 crc kubenswrapper[4884]: I1128 17:43:56.295199 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:43:56 crc kubenswrapper[4884]: I1128 17:43:56.970082 4884 generic.go:334] "Generic (PLEG): container finished" podID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerID="5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59" exitCode=0 Nov 28 17:43:56 crc kubenswrapper[4884]: I1128 17:43:56.970610 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerDied","Data":"5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59"} Nov 28 17:43:56 crc kubenswrapper[4884]: I1128 17:43:56.970734 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerStarted","Data":"95a6d7a4be92cb9c86d13f2f4f71e167eb75fcec3fad55456d550f0cbbfa64b3"} Nov 28 17:43:58 crc kubenswrapper[4884]: I1128 17:43:58.995104 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerStarted","Data":"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4"} Nov 28 17:44:01 crc kubenswrapper[4884]: I1128 17:44:01.019041 4884 generic.go:334] "Generic (PLEG): container finished" podID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerID="b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4" exitCode=0 Nov 28 17:44:01 crc kubenswrapper[4884]: I1128 17:44:01.019136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerDied","Data":"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4"} Nov 28 17:44:03 crc kubenswrapper[4884]: I1128 17:44:03.045054 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerStarted","Data":"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b"} Nov 28 17:44:03 crc kubenswrapper[4884]: I1128 17:44:03.067672 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xn8pj" podStartSLOduration=2.833869833 podStartE2EDuration="8.067650575s" podCreationTimestamp="2025-11-28 17:43:55 +0000 UTC" firstStartedPulling="2025-11-28 17:43:56.972041425 +0000 UTC m=+8676.534825226" lastFinishedPulling="2025-11-28 17:44:02.205822167 +0000 UTC m=+8681.768605968" observedRunningTime="2025-11-28 17:44:03.065053131 +0000 UTC m=+8682.627836932" watchObservedRunningTime="2025-11-28 17:44:03.067650575 +0000 UTC m=+8682.630434376" Nov 28 17:44:05 crc kubenswrapper[4884]: I1128 17:44:05.754685 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:05 crc kubenswrapper[4884]: I1128 17:44:05.756067 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:06 crc kubenswrapper[4884]: I1128 17:44:06.845689 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xn8pj" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="registry-server" probeResult="failure" output=< Nov 28 17:44:06 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:44:06 crc kubenswrapper[4884]: > Nov 28 17:44:09 crc kubenswrapper[4884]: I1128 17:44:09.688539 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:44:09 crc kubenswrapper[4884]: E1128 17:44:09.689302 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:44:15 crc kubenswrapper[4884]: I1128 17:44:15.837967 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:15 crc kubenswrapper[4884]: I1128 17:44:15.898035 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:16 crc kubenswrapper[4884]: I1128 17:44:16.080341 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.195648 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xn8pj" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="registry-server" containerID="cri-o://dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b" gracePeriod=2 Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.654337 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.794449 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content\") pod \"e8b33785-3f4d-4eb4-ba0b-70543c371300\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.794594 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-db52m\" (UniqueName: \"kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m\") pod \"e8b33785-3f4d-4eb4-ba0b-70543c371300\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.794815 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities\") pod \"e8b33785-3f4d-4eb4-ba0b-70543c371300\" (UID: \"e8b33785-3f4d-4eb4-ba0b-70543c371300\") " Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.795557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities" (OuterVolumeSpecName: "utilities") pod "e8b33785-3f4d-4eb4-ba0b-70543c371300" (UID: "e8b33785-3f4d-4eb4-ba0b-70543c371300"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.798789 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.799499 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m" (OuterVolumeSpecName: "kube-api-access-db52m") pod "e8b33785-3f4d-4eb4-ba0b-70543c371300" (UID: "e8b33785-3f4d-4eb4-ba0b-70543c371300"). InnerVolumeSpecName "kube-api-access-db52m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.900263 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-db52m\" (UniqueName: \"kubernetes.io/projected/e8b33785-3f4d-4eb4-ba0b-70543c371300-kube-api-access-db52m\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:17 crc kubenswrapper[4884]: I1128 17:44:17.915793 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e8b33785-3f4d-4eb4-ba0b-70543c371300" (UID: "e8b33785-3f4d-4eb4-ba0b-70543c371300"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.001739 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e8b33785-3f4d-4eb4-ba0b-70543c371300-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.207050 4884 generic.go:334] "Generic (PLEG): container finished" podID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerID="dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b" exitCode=0 Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.207124 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerDied","Data":"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b"} Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.207211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xn8pj" event={"ID":"e8b33785-3f4d-4eb4-ba0b-70543c371300","Type":"ContainerDied","Data":"95a6d7a4be92cb9c86d13f2f4f71e167eb75fcec3fad55456d550f0cbbfa64b3"} Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.207252 4884 scope.go:117] "RemoveContainer" containerID="dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.207297 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xn8pj" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.241810 4884 scope.go:117] "RemoveContainer" containerID="b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.256856 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.265134 4884 scope.go:117] "RemoveContainer" containerID="5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.268740 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xn8pj"] Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.313563 4884 scope.go:117] "RemoveContainer" containerID="dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b" Nov 28 17:44:18 crc kubenswrapper[4884]: E1128 17:44:18.313949 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b\": container with ID starting with dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b not found: ID does not exist" containerID="dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.313983 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b"} err="failed to get container status \"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b\": rpc error: code = NotFound desc = could not find container \"dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b\": container with ID starting with dbb10914633b2dc5856c6b7319f2e806403a35a973ed18963adf05cc5754cd3b not found: ID does not exist" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.314004 4884 scope.go:117] "RemoveContainer" containerID="b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4" Nov 28 17:44:18 crc kubenswrapper[4884]: E1128 17:44:18.314277 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4\": container with ID starting with b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4 not found: ID does not exist" containerID="b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.314300 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4"} err="failed to get container status \"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4\": rpc error: code = NotFound desc = could not find container \"b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4\": container with ID starting with b3c0df0a9b8019178d2195d2fa7cbcb8cd121ad853919e6043cdd51cf7136ec4 not found: ID does not exist" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.314317 4884 scope.go:117] "RemoveContainer" containerID="5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59" Nov 28 17:44:18 crc kubenswrapper[4884]: E1128 17:44:18.314548 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59\": container with ID starting with 5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59 not found: ID does not exist" containerID="5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.314568 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59"} err="failed to get container status \"5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59\": rpc error: code = NotFound desc = could not find container \"5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59\": container with ID starting with 5dd4a189055992a88c308a402cf079c4bc2c5b95a5701abd91fd8d8b26f05a59 not found: ID does not exist" Nov 28 17:44:18 crc kubenswrapper[4884]: I1128 17:44:18.700845 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" path="/var/lib/kubelet/pods/e8b33785-3f4d-4eb4-ba0b-70543c371300/volumes" Nov 28 17:44:21 crc kubenswrapper[4884]: I1128 17:44:21.688509 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:44:21 crc kubenswrapper[4884]: E1128 17:44:21.689051 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.296889 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4mfmn"] Nov 28 17:44:24 crc kubenswrapper[4884]: E1128 17:44:24.298277 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="extract-content" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.298299 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="extract-content" Nov 28 17:44:24 crc kubenswrapper[4884]: E1128 17:44:24.298362 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="registry-server" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.298389 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="registry-server" Nov 28 17:44:24 crc kubenswrapper[4884]: E1128 17:44:24.298442 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="extract-utilities" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.298456 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="extract-utilities" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.298845 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8b33785-3f4d-4eb4-ba0b-70543c371300" containerName="registry-server" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.303065 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.322195 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4mfmn"] Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.347810 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-utilities\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.347939 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76l6f\" (UniqueName: \"kubernetes.io/projected/596c2102-9c67-495b-8080-beed3c62c0e1-kube-api-access-76l6f\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.348458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-catalog-content\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.449454 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-catalog-content\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.449525 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-utilities\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.449551 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76l6f\" (UniqueName: \"kubernetes.io/projected/596c2102-9c67-495b-8080-beed3c62c0e1-kube-api-access-76l6f\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.449976 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-catalog-content\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.450028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/596c2102-9c67-495b-8080-beed3c62c0e1-utilities\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.470499 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76l6f\" (UniqueName: \"kubernetes.io/projected/596c2102-9c67-495b-8080-beed3c62c0e1-kube-api-access-76l6f\") pod \"community-operators-4mfmn\" (UID: \"596c2102-9c67-495b-8080-beed3c62c0e1\") " pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:24 crc kubenswrapper[4884]: I1128 17:44:24.625651 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:25 crc kubenswrapper[4884]: I1128 17:44:25.167190 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4mfmn"] Nov 28 17:44:25 crc kubenswrapper[4884]: I1128 17:44:25.281546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4mfmn" event={"ID":"596c2102-9c67-495b-8080-beed3c62c0e1","Type":"ContainerStarted","Data":"3aa7ac7d7bfc2eef6f3ef5a85a03ee5b4a1d7800e064f63dd77f582e9f017dec"} Nov 28 17:44:26 crc kubenswrapper[4884]: I1128 17:44:26.294128 4884 generic.go:334] "Generic (PLEG): container finished" podID="596c2102-9c67-495b-8080-beed3c62c0e1" containerID="2f915d64c401260f7743e4a9210dccc2df7800570d39c3c2ea819a13aceca1b3" exitCode=0 Nov 28 17:44:26 crc kubenswrapper[4884]: I1128 17:44:26.294204 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4mfmn" event={"ID":"596c2102-9c67-495b-8080-beed3c62c0e1","Type":"ContainerDied","Data":"2f915d64c401260f7743e4a9210dccc2df7800570d39c3c2ea819a13aceca1b3"} Nov 28 17:44:31 crc kubenswrapper[4884]: I1128 17:44:31.348056 4884 generic.go:334] "Generic (PLEG): container finished" podID="596c2102-9c67-495b-8080-beed3c62c0e1" containerID="c01fd40cf3dc93d882d408711c1afdcba91cac3cc99e40b90aba26d88b0d4642" exitCode=0 Nov 28 17:44:31 crc kubenswrapper[4884]: I1128 17:44:31.348124 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4mfmn" event={"ID":"596c2102-9c67-495b-8080-beed3c62c0e1","Type":"ContainerDied","Data":"c01fd40cf3dc93d882d408711c1afdcba91cac3cc99e40b90aba26d88b0d4642"} Nov 28 17:44:32 crc kubenswrapper[4884]: I1128 17:44:32.362429 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4mfmn" event={"ID":"596c2102-9c67-495b-8080-beed3c62c0e1","Type":"ContainerStarted","Data":"e60164fa9f8c64a6c85d17073320369a538993007c891a4e21197cd8cb36a998"} Nov 28 17:44:32 crc kubenswrapper[4884]: I1128 17:44:32.385971 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4mfmn" podStartSLOduration=2.784022446 podStartE2EDuration="8.385951508s" podCreationTimestamp="2025-11-28 17:44:24 +0000 UTC" firstStartedPulling="2025-11-28 17:44:26.296597272 +0000 UTC m=+8705.859381063" lastFinishedPulling="2025-11-28 17:44:31.898526324 +0000 UTC m=+8711.461310125" observedRunningTime="2025-11-28 17:44:32.384945884 +0000 UTC m=+8711.947729705" watchObservedRunningTime="2025-11-28 17:44:32.385951508 +0000 UTC m=+8711.948735309" Nov 28 17:44:34 crc kubenswrapper[4884]: I1128 17:44:34.626293 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:34 crc kubenswrapper[4884]: I1128 17:44:34.626780 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:34 crc kubenswrapper[4884]: I1128 17:44:34.682844 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:34 crc kubenswrapper[4884]: I1128 17:44:34.688447 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:44:34 crc kubenswrapper[4884]: E1128 17:44:34.689193 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:44:44 crc kubenswrapper[4884]: I1128 17:44:44.681556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4mfmn" Nov 28 17:44:44 crc kubenswrapper[4884]: I1128 17:44:44.779207 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4mfmn"] Nov 28 17:44:44 crc kubenswrapper[4884]: I1128 17:44:44.839697 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 17:44:44 crc kubenswrapper[4884]: I1128 17:44:44.839991 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q82p5" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="registry-server" containerID="cri-o://aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb" gracePeriod=2 Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.385668 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q82p5" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.510349 4884 generic.go:334] "Generic (PLEG): container finished" podID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerID="aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb" exitCode=0 Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.510452 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q82p5" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.510510 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerDied","Data":"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb"} Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.510726 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q82p5" event={"ID":"a0cd71d0-76ac-4f4e-8e35-f1624368814e","Type":"ContainerDied","Data":"12581e966bb90f914c4bef9fc4b1a8014bde2c79aecf085c99e019eaa389287d"} Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.510769 4884 scope.go:117] "RemoveContainer" containerID="aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.547744 4884 scope.go:117] "RemoveContainer" containerID="adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.548819 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content\") pod \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.548900 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4prw8\" (UniqueName: \"kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8\") pod \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.549543 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities\") pod \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\" (UID: \"a0cd71d0-76ac-4f4e-8e35-f1624368814e\") " Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.550569 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities" (OuterVolumeSpecName: "utilities") pod "a0cd71d0-76ac-4f4e-8e35-f1624368814e" (UID: "a0cd71d0-76ac-4f4e-8e35-f1624368814e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.556973 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8" (OuterVolumeSpecName: "kube-api-access-4prw8") pod "a0cd71d0-76ac-4f4e-8e35-f1624368814e" (UID: "a0cd71d0-76ac-4f4e-8e35-f1624368814e"). InnerVolumeSpecName "kube-api-access-4prw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.587808 4884 scope.go:117] "RemoveContainer" containerID="5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.609823 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0cd71d0-76ac-4f4e-8e35-f1624368814e" (UID: "a0cd71d0-76ac-4f4e-8e35-f1624368814e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.652019 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.652058 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0cd71d0-76ac-4f4e-8e35-f1624368814e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.652071 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4prw8\" (UniqueName: \"kubernetes.io/projected/a0cd71d0-76ac-4f4e-8e35-f1624368814e-kube-api-access-4prw8\") on node \"crc\" DevicePath \"\"" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.688549 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:44:45 crc kubenswrapper[4884]: E1128 17:44:45.688808 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.692967 4884 scope.go:117] "RemoveContainer" containerID="aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb" Nov 28 17:44:45 crc kubenswrapper[4884]: E1128 17:44:45.694395 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb\": container with ID starting with aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb not found: ID does not exist" containerID="aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.694425 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb"} err="failed to get container status \"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb\": rpc error: code = NotFound desc = could not find container \"aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb\": container with ID starting with aaae41db65d61cb9f897e8963c560470d421532cf346c200dad71e51459e00fb not found: ID does not exist" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.694445 4884 scope.go:117] "RemoveContainer" containerID="adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310" Nov 28 17:44:45 crc kubenswrapper[4884]: E1128 17:44:45.694814 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310\": container with ID starting with adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310 not found: ID does not exist" containerID="adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.694847 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310"} err="failed to get container status \"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310\": rpc error: code = NotFound desc = could not find container \"adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310\": container with ID starting with adc815443b3a8e4a027cadda660c67829a8829bf6c73dadbab187cf6ab6b6310 not found: ID does not exist" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.694863 4884 scope.go:117] "RemoveContainer" containerID="5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce" Nov 28 17:44:45 crc kubenswrapper[4884]: E1128 17:44:45.695439 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce\": container with ID starting with 5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce not found: ID does not exist" containerID="5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.695464 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce"} err="failed to get container status \"5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce\": rpc error: code = NotFound desc = could not find container \"5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce\": container with ID starting with 5ff3ec83c89845d7e69b77bb29e2f78fb29a310cd6577f124a4710168105e4ce not found: ID does not exist" Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.865876 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 17:44:45 crc kubenswrapper[4884]: I1128 17:44:45.883747 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q82p5"] Nov 28 17:44:46 crc kubenswrapper[4884]: I1128 17:44:46.702236 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" path="/var/lib/kubelet/pods/a0cd71d0-76ac-4f4e-8e35-f1624368814e/volumes" Nov 28 17:44:58 crc kubenswrapper[4884]: I1128 17:44:58.688859 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:44:58 crc kubenswrapper[4884]: E1128 17:44:58.690023 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.155280 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx"] Nov 28 17:45:00 crc kubenswrapper[4884]: E1128 17:45:00.156170 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="extract-utilities" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.156188 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="extract-utilities" Nov 28 17:45:00 crc kubenswrapper[4884]: E1128 17:45:00.156210 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="registry-server" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.156218 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="registry-server" Nov 28 17:45:00 crc kubenswrapper[4884]: E1128 17:45:00.156261 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="extract-content" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.156271 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="extract-content" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.156587 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0cd71d0-76ac-4f4e-8e35-f1624368814e" containerName="registry-server" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.157490 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.159497 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.159799 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.166268 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx"] Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.192445 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.192543 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.192905 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5kjr\" (UniqueName: \"kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.295134 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.295209 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.295286 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5kjr\" (UniqueName: \"kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.296409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.305394 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.314524 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5kjr\" (UniqueName: \"kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr\") pod \"collect-profiles-29405865-9vtcx\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.484317 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:00 crc kubenswrapper[4884]: I1128 17:45:00.958465 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx"] Nov 28 17:45:01 crc kubenswrapper[4884]: I1128 17:45:01.689272 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a521d6c-0a3f-4ae6-9897-f08069673c5d" containerID="5e3e342355cec062180319eac48c672e020d5c8e928173f6b83a65e54e5bb77a" exitCode=0 Nov 28 17:45:01 crc kubenswrapper[4884]: I1128 17:45:01.689323 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" event={"ID":"4a521d6c-0a3f-4ae6-9897-f08069673c5d","Type":"ContainerDied","Data":"5e3e342355cec062180319eac48c672e020d5c8e928173f6b83a65e54e5bb77a"} Nov 28 17:45:01 crc kubenswrapper[4884]: I1128 17:45:01.689510 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" event={"ID":"4a521d6c-0a3f-4ae6-9897-f08069673c5d","Type":"ContainerStarted","Data":"a216382b9ba6d36a17f4497e4faac14007c113b80757d6dfae6720a326dd82fb"} Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.281298 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.366345 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume\") pod \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.366439 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume\") pod \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.366516 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5kjr\" (UniqueName: \"kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr\") pod \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\" (UID: \"4a521d6c-0a3f-4ae6-9897-f08069673c5d\") " Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.367260 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume" (OuterVolumeSpecName: "config-volume") pod "4a521d6c-0a3f-4ae6-9897-f08069673c5d" (UID: "4a521d6c-0a3f-4ae6-9897-f08069673c5d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.372985 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4a521d6c-0a3f-4ae6-9897-f08069673c5d" (UID: "4a521d6c-0a3f-4ae6-9897-f08069673c5d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.373199 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr" (OuterVolumeSpecName: "kube-api-access-d5kjr") pod "4a521d6c-0a3f-4ae6-9897-f08069673c5d" (UID: "4a521d6c-0a3f-4ae6-9897-f08069673c5d"). InnerVolumeSpecName "kube-api-access-d5kjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.469004 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5kjr\" (UniqueName: \"kubernetes.io/projected/4a521d6c-0a3f-4ae6-9897-f08069673c5d-kube-api-access-d5kjr\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.469034 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4a521d6c-0a3f-4ae6-9897-f08069673c5d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.469044 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4a521d6c-0a3f-4ae6-9897-f08069673c5d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.710925 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" event={"ID":"4a521d6c-0a3f-4ae6-9897-f08069673c5d","Type":"ContainerDied","Data":"a216382b9ba6d36a17f4497e4faac14007c113b80757d6dfae6720a326dd82fb"} Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.711606 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a216382b9ba6d36a17f4497e4faac14007c113b80757d6dfae6720a326dd82fb" Nov 28 17:45:03 crc kubenswrapper[4884]: I1128 17:45:03.710997 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405865-9vtcx" Nov 28 17:45:04 crc kubenswrapper[4884]: I1128 17:45:04.376009 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9"] Nov 28 17:45:04 crc kubenswrapper[4884]: I1128 17:45:04.389542 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405820-pt8g9"] Nov 28 17:45:04 crc kubenswrapper[4884]: I1128 17:45:04.703133 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa3fe1bb-473c-4cd0-9e1b-f684b7926b17" path="/var/lib/kubelet/pods/fa3fe1bb-473c-4cd0-9e1b-f684b7926b17/volumes" Nov 28 17:45:13 crc kubenswrapper[4884]: I1128 17:45:13.688286 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:45:13 crc kubenswrapper[4884]: E1128 17:45:13.689248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:45:26 crc kubenswrapper[4884]: I1128 17:45:26.688522 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:45:26 crc kubenswrapper[4884]: E1128 17:45:26.689609 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:45:37 crc kubenswrapper[4884]: I1128 17:45:37.995795 4884 scope.go:117] "RemoveContainer" containerID="f2d9cf2a2ef20bdd0713da8fdf5572fe8ee35bd323fd16d8dfb39b5446ecba80" Nov 28 17:45:38 crc kubenswrapper[4884]: I1128 17:45:38.689869 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:45:38 crc kubenswrapper[4884]: E1128 17:45:38.690777 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:45:51 crc kubenswrapper[4884]: I1128 17:45:51.689403 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:45:51 crc kubenswrapper[4884]: E1128 17:45:51.690314 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:46:06 crc kubenswrapper[4884]: I1128 17:46:06.689126 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:46:06 crc kubenswrapper[4884]: E1128 17:46:06.690049 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:46:21 crc kubenswrapper[4884]: I1128 17:46:21.688149 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:46:21 crc kubenswrapper[4884]: E1128 17:46:21.689142 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:46:23 crc kubenswrapper[4884]: I1128 17:46:23.616165 4884 generic.go:334] "Generic (PLEG): container finished" podID="1c9d690e-b787-4b1a-a760-83f68d6a69a0" containerID="15004727253bab5535b8c407b98087d67f5dbc29d89947c20ae90829206b15e1" exitCode=0 Nov 28 17:46:23 crc kubenswrapper[4884]: I1128 17:46:23.616262 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" event={"ID":"1c9d690e-b787-4b1a-a760-83f68d6a69a0","Type":"ContainerDied","Data":"15004727253bab5535b8c407b98087d67f5dbc29d89947c20ae90829206b15e1"} Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.158658 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.241190 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.241977 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjp4k\" (UniqueName: \"kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242310 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242365 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242410 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242440 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.242457 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2\") pod \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\" (UID: \"1c9d690e-b787-4b1a-a760-83f68d6a69a0\") " Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.252813 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.252849 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k" (OuterVolumeSpecName: "kube-api-access-tjp4k") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "kube-api-access-tjp4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.255513 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph" (OuterVolumeSpecName: "ceph") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.270291 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.322702 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.322719 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory" (OuterVolumeSpecName: "inventory") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.324254 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.328256 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "1c9d690e-b787-4b1a-a760-83f68d6a69a0" (UID: "1c9d690e-b787-4b1a-a760-83f68d6a69a0"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347520 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjp4k\" (UniqueName: \"kubernetes.io/projected/1c9d690e-b787-4b1a-a760-83f68d6a69a0-kube-api-access-tjp4k\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347562 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347573 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347583 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347593 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347602 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347611 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.347624 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c9d690e-b787-4b1a-a760-83f68d6a69a0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.637397 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" event={"ID":"1c9d690e-b787-4b1a-a760-83f68d6a69a0","Type":"ContainerDied","Data":"54534c72a30a734b8d929649671d94792e62502b1657cdbd8b56ee213949af3e"} Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.637904 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54534c72a30a734b8d929649671d94792e62502b1657cdbd8b56ee213949af3e" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.637432 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-4tvmx" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.749155 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-xgw8z"] Nov 28 17:46:25 crc kubenswrapper[4884]: E1128 17:46:25.749715 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a521d6c-0a3f-4ae6-9897-f08069673c5d" containerName="collect-profiles" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.749738 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a521d6c-0a3f-4ae6-9897-f08069673c5d" containerName="collect-profiles" Nov 28 17:46:25 crc kubenswrapper[4884]: E1128 17:46:25.749771 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9d690e-b787-4b1a-a760-83f68d6a69a0" containerName="telemetry-openstack-openstack-cell1" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.749780 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9d690e-b787-4b1a-a760-83f68d6a69a0" containerName="telemetry-openstack-openstack-cell1" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.750049 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a521d6c-0a3f-4ae6-9897-f08069673c5d" containerName="collect-profiles" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.750128 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9d690e-b787-4b1a-a760-83f68d6a69a0" containerName="telemetry-openstack-openstack-cell1" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.751033 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.757795 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-xgw8z"] Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.758475 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.758755 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.758867 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.758974 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.759082 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.864616 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.864762 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.864801 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.864913 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.864997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.865053 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spq85\" (UniqueName: \"kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967055 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967308 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.967475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spq85\" (UniqueName: \"kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.972577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.972847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.972986 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.973236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.973886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:25 crc kubenswrapper[4884]: I1128 17:46:25.986794 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spq85\" (UniqueName: \"kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85\") pod \"neutron-sriov-openstack-openstack-cell1-xgw8z\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:26 crc kubenswrapper[4884]: I1128 17:46:26.105391 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:46:26 crc kubenswrapper[4884]: I1128 17:46:26.678970 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-xgw8z"] Nov 28 17:46:27 crc kubenswrapper[4884]: I1128 17:46:27.657416 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" event={"ID":"c214f7b4-074f-4801-b6b6-375669694260","Type":"ContainerStarted","Data":"e8b09e888218fb287b8c002b21115977b3f93151ecd4ae95681da9db059b89cc"} Nov 28 17:46:28 crc kubenswrapper[4884]: I1128 17:46:28.668062 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" event={"ID":"c214f7b4-074f-4801-b6b6-375669694260","Type":"ContainerStarted","Data":"11cd60dfc6c550aa4573e65abde50cace7a1f9370001e176efea1dae01f47747"} Nov 28 17:46:28 crc kubenswrapper[4884]: I1128 17:46:28.686703 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" podStartSLOduration=2.957186319 podStartE2EDuration="3.686686201s" podCreationTimestamp="2025-11-28 17:46:25 +0000 UTC" firstStartedPulling="2025-11-28 17:46:26.686524695 +0000 UTC m=+8826.249308496" lastFinishedPulling="2025-11-28 17:46:27.416024587 +0000 UTC m=+8826.978808378" observedRunningTime="2025-11-28 17:46:28.685626635 +0000 UTC m=+8828.248410446" watchObservedRunningTime="2025-11-28 17:46:28.686686201 +0000 UTC m=+8828.249469992" Nov 28 17:46:32 crc kubenswrapper[4884]: I1128 17:46:32.689227 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:46:32 crc kubenswrapper[4884]: E1128 17:46:32.689976 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:46:45 crc kubenswrapper[4884]: I1128 17:46:45.689292 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:46:45 crc kubenswrapper[4884]: E1128 17:46:45.690592 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.802677 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.807965 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.822628 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.972430 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.972498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:50 crc kubenswrapper[4884]: I1128 17:46:50.972541 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdmt2\" (UniqueName: \"kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.075039 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.075104 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.075128 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdmt2\" (UniqueName: \"kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.075578 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.075607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.096427 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdmt2\" (UniqueName: \"kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2\") pod \"redhat-marketplace-99sln\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.149197 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.679885 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:46:51 crc kubenswrapper[4884]: I1128 17:46:51.921189 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerStarted","Data":"9ac583875f9c7afeeb4609d98744a00336c26b5d0fc57e65fe6a9fbd50087ca8"} Nov 28 17:46:52 crc kubenswrapper[4884]: I1128 17:46:52.932924 4884 generic.go:334] "Generic (PLEG): container finished" podID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerID="2d6fefa8958d9a547e5e4eae4707ac6667b17a9a49c9d4775f082117e351090c" exitCode=0 Nov 28 17:46:52 crc kubenswrapper[4884]: I1128 17:46:52.933013 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerDied","Data":"2d6fefa8958d9a547e5e4eae4707ac6667b17a9a49c9d4775f082117e351090c"} Nov 28 17:46:53 crc kubenswrapper[4884]: I1128 17:46:53.948648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerStarted","Data":"fb73ab54cfc732baec6107b8acbb9251fe7aa4b8b1062fdd6bbdd1703d508a26"} Nov 28 17:46:54 crc kubenswrapper[4884]: I1128 17:46:54.960590 4884 generic.go:334] "Generic (PLEG): container finished" podID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerID="fb73ab54cfc732baec6107b8acbb9251fe7aa4b8b1062fdd6bbdd1703d508a26" exitCode=0 Nov 28 17:46:54 crc kubenswrapper[4884]: I1128 17:46:54.960709 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerDied","Data":"fb73ab54cfc732baec6107b8acbb9251fe7aa4b8b1062fdd6bbdd1703d508a26"} Nov 28 17:46:55 crc kubenswrapper[4884]: I1128 17:46:55.975080 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerStarted","Data":"592cb726d126c7ffb111ecab277ed4ef3dd58dd13a23064ba5e2d8426e6ae74b"} Nov 28 17:46:56 crc kubenswrapper[4884]: I1128 17:46:56.001878 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-99sln" podStartSLOduration=3.512685377 podStartE2EDuration="6.001857756s" podCreationTimestamp="2025-11-28 17:46:50 +0000 UTC" firstStartedPulling="2025-11-28 17:46:52.935214844 +0000 UTC m=+8852.497998645" lastFinishedPulling="2025-11-28 17:46:55.424387223 +0000 UTC m=+8854.987171024" observedRunningTime="2025-11-28 17:46:55.994880955 +0000 UTC m=+8855.557664766" watchObservedRunningTime="2025-11-28 17:46:56.001857756 +0000 UTC m=+8855.564641557" Nov 28 17:46:58 crc kubenswrapper[4884]: I1128 17:46:58.688490 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:46:58 crc kubenswrapper[4884]: E1128 17:46:58.690292 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:47:01 crc kubenswrapper[4884]: I1128 17:47:01.149401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:01 crc kubenswrapper[4884]: I1128 17:47:01.149730 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:01 crc kubenswrapper[4884]: I1128 17:47:01.205930 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:02 crc kubenswrapper[4884]: I1128 17:47:02.110350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:02 crc kubenswrapper[4884]: I1128 17:47:02.170151 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:47:04 crc kubenswrapper[4884]: I1128 17:47:04.050309 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-99sln" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="registry-server" containerID="cri-o://592cb726d126c7ffb111ecab277ed4ef3dd58dd13a23064ba5e2d8426e6ae74b" gracePeriod=2 Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.064716 4884 generic.go:334] "Generic (PLEG): container finished" podID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerID="592cb726d126c7ffb111ecab277ed4ef3dd58dd13a23064ba5e2d8426e6ae74b" exitCode=0 Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.065826 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerDied","Data":"592cb726d126c7ffb111ecab277ed4ef3dd58dd13a23064ba5e2d8426e6ae74b"} Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.065971 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-99sln" event={"ID":"54c95aac-d39f-47ef-aa15-f04c61ba84c6","Type":"ContainerDied","Data":"9ac583875f9c7afeeb4609d98744a00336c26b5d0fc57e65fe6a9fbd50087ca8"} Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.066056 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ac583875f9c7afeeb4609d98744a00336c26b5d0fc57e65fe6a9fbd50087ca8" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.149620 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.290757 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content\") pod \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.291200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities\") pod \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.291395 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdmt2\" (UniqueName: \"kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2\") pod \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\" (UID: \"54c95aac-d39f-47ef-aa15-f04c61ba84c6\") " Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.293401 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities" (OuterVolumeSpecName: "utilities") pod "54c95aac-d39f-47ef-aa15-f04c61ba84c6" (UID: "54c95aac-d39f-47ef-aa15-f04c61ba84c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.297401 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2" (OuterVolumeSpecName: "kube-api-access-bdmt2") pod "54c95aac-d39f-47ef-aa15-f04c61ba84c6" (UID: "54c95aac-d39f-47ef-aa15-f04c61ba84c6"). InnerVolumeSpecName "kube-api-access-bdmt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.319191 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54c95aac-d39f-47ef-aa15-f04c61ba84c6" (UID: "54c95aac-d39f-47ef-aa15-f04c61ba84c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.395066 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdmt2\" (UniqueName: \"kubernetes.io/projected/54c95aac-d39f-47ef-aa15-f04c61ba84c6-kube-api-access-bdmt2\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.395121 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:05 crc kubenswrapper[4884]: I1128 17:47:05.395161 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54c95aac-d39f-47ef-aa15-f04c61ba84c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:47:06 crc kubenswrapper[4884]: I1128 17:47:06.074838 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-99sln" Nov 28 17:47:06 crc kubenswrapper[4884]: I1128 17:47:06.124676 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:47:06 crc kubenswrapper[4884]: I1128 17:47:06.138848 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-99sln"] Nov 28 17:47:06 crc kubenswrapper[4884]: I1128 17:47:06.706047 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" path="/var/lib/kubelet/pods/54c95aac-d39f-47ef-aa15-f04c61ba84c6/volumes" Nov 28 17:47:10 crc kubenswrapper[4884]: I1128 17:47:10.695548 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:47:10 crc kubenswrapper[4884]: E1128 17:47:10.696470 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:47:21 crc kubenswrapper[4884]: I1128 17:47:21.688732 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:47:22 crc kubenswrapper[4884]: I1128 17:47:22.273213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22"} Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.186575 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:45 crc kubenswrapper[4884]: E1128 17:48:45.187838 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="registry-server" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.187856 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="registry-server" Nov 28 17:48:45 crc kubenswrapper[4884]: E1128 17:48:45.187876 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="extract-content" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.187883 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="extract-content" Nov 28 17:48:45 crc kubenswrapper[4884]: E1128 17:48:45.187929 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="extract-utilities" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.187937 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="extract-utilities" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.188192 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="54c95aac-d39f-47ef-aa15-f04c61ba84c6" containerName="registry-server" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.191565 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.215569 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.254622 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.254693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.254883 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2pqv\" (UniqueName: \"kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.356294 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2pqv\" (UniqueName: \"kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.356442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.356491 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.357119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.357250 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.374832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2pqv\" (UniqueName: \"kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv\") pod \"certified-operators-fq8p8\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:45 crc kubenswrapper[4884]: I1128 17:48:45.527155 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:46 crc kubenswrapper[4884]: I1128 17:48:46.118941 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:46 crc kubenswrapper[4884]: I1128 17:48:46.274194 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerStarted","Data":"e3cf091a1c02309fe648c940e6e557ae9bf946df9064c23d8f6e55dcc794efcb"} Nov 28 17:48:47 crc kubenswrapper[4884]: I1128 17:48:47.288281 4884 generic.go:334] "Generic (PLEG): container finished" podID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerID="a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451" exitCode=0 Nov 28 17:48:47 crc kubenswrapper[4884]: I1128 17:48:47.288335 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerDied","Data":"a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451"} Nov 28 17:48:47 crc kubenswrapper[4884]: I1128 17:48:47.291916 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:48:49 crc kubenswrapper[4884]: I1128 17:48:49.315137 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerStarted","Data":"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848"} Nov 28 17:48:50 crc kubenswrapper[4884]: I1128 17:48:50.329374 4884 generic.go:334] "Generic (PLEG): container finished" podID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerID="f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848" exitCode=0 Nov 28 17:48:50 crc kubenswrapper[4884]: I1128 17:48:50.329434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerDied","Data":"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848"} Nov 28 17:48:51 crc kubenswrapper[4884]: I1128 17:48:51.342624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerStarted","Data":"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e"} Nov 28 17:48:51 crc kubenswrapper[4884]: I1128 17:48:51.366856 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fq8p8" podStartSLOduration=2.842977873 podStartE2EDuration="6.366839707s" podCreationTimestamp="2025-11-28 17:48:45 +0000 UTC" firstStartedPulling="2025-11-28 17:48:47.29163731 +0000 UTC m=+8966.854421111" lastFinishedPulling="2025-11-28 17:48:50.815499144 +0000 UTC m=+8970.378282945" observedRunningTime="2025-11-28 17:48:51.361753223 +0000 UTC m=+8970.924537034" watchObservedRunningTime="2025-11-28 17:48:51.366839707 +0000 UTC m=+8970.929623508" Nov 28 17:48:55 crc kubenswrapper[4884]: I1128 17:48:55.528115 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:55 crc kubenswrapper[4884]: I1128 17:48:55.529190 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:55 crc kubenswrapper[4884]: I1128 17:48:55.584370 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:56 crc kubenswrapper[4884]: I1128 17:48:56.464121 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:58 crc kubenswrapper[4884]: I1128 17:48:58.548582 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:58 crc kubenswrapper[4884]: I1128 17:48:58.548821 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fq8p8" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="registry-server" containerID="cri-o://10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e" gracePeriod=2 Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.083332 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.185008 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content\") pod \"f690c0a2-ff48-4859-978f-b6e65ad65592\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.185604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2pqv\" (UniqueName: \"kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv\") pod \"f690c0a2-ff48-4859-978f-b6e65ad65592\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.185764 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities\") pod \"f690c0a2-ff48-4859-978f-b6e65ad65592\" (UID: \"f690c0a2-ff48-4859-978f-b6e65ad65592\") " Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.186879 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities" (OuterVolumeSpecName: "utilities") pod "f690c0a2-ff48-4859-978f-b6e65ad65592" (UID: "f690c0a2-ff48-4859-978f-b6e65ad65592"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.194287 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv" (OuterVolumeSpecName: "kube-api-access-d2pqv") pod "f690c0a2-ff48-4859-978f-b6e65ad65592" (UID: "f690c0a2-ff48-4859-978f-b6e65ad65592"). InnerVolumeSpecName "kube-api-access-d2pqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.290935 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.291235 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2pqv\" (UniqueName: \"kubernetes.io/projected/f690c0a2-ff48-4859-978f-b6e65ad65592-kube-api-access-d2pqv\") on node \"crc\" DevicePath \"\"" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.326414 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f690c0a2-ff48-4859-978f-b6e65ad65592" (UID: "f690c0a2-ff48-4859-978f-b6e65ad65592"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.395302 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f690c0a2-ff48-4859-978f-b6e65ad65592-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.428071 4884 generic.go:334] "Generic (PLEG): container finished" podID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerID="10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e" exitCode=0 Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.428154 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fq8p8" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.428393 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerDied","Data":"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e"} Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.428492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fq8p8" event={"ID":"f690c0a2-ff48-4859-978f-b6e65ad65592","Type":"ContainerDied","Data":"e3cf091a1c02309fe648c940e6e557ae9bf946df9064c23d8f6e55dcc794efcb"} Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.428581 4884 scope.go:117] "RemoveContainer" containerID="10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.466304 4884 scope.go:117] "RemoveContainer" containerID="f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.477641 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.490263 4884 scope.go:117] "RemoveContainer" containerID="a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.499435 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fq8p8"] Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.554857 4884 scope.go:117] "RemoveContainer" containerID="10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e" Nov 28 17:48:59 crc kubenswrapper[4884]: E1128 17:48:59.555355 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e\": container with ID starting with 10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e not found: ID does not exist" containerID="10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.555394 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e"} err="failed to get container status \"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e\": rpc error: code = NotFound desc = could not find container \"10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e\": container with ID starting with 10e913052dffbbcdf202e2a5ab6c8bbddbbc77999b9fc67e40fd391cd7a4942e not found: ID does not exist" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.555423 4884 scope.go:117] "RemoveContainer" containerID="f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848" Nov 28 17:48:59 crc kubenswrapper[4884]: E1128 17:48:59.555797 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848\": container with ID starting with f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848 not found: ID does not exist" containerID="f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.555840 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848"} err="failed to get container status \"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848\": rpc error: code = NotFound desc = could not find container \"f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848\": container with ID starting with f9486fe01d8644c0e21e6998bfd4702d38da8c05d2019ea501b797afdfd27848 not found: ID does not exist" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.555865 4884 scope.go:117] "RemoveContainer" containerID="a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451" Nov 28 17:48:59 crc kubenswrapper[4884]: E1128 17:48:59.556219 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451\": container with ID starting with a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451 not found: ID does not exist" containerID="a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451" Nov 28 17:48:59 crc kubenswrapper[4884]: I1128 17:48:59.556239 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451"} err="failed to get container status \"a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451\": rpc error: code = NotFound desc = could not find container \"a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451\": container with ID starting with a459de855cb7bfea5fb6f75da56f9e3ad896da4a87744366464aea454d6d7451 not found: ID does not exist" Nov 28 17:49:00 crc kubenswrapper[4884]: I1128 17:49:00.709169 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" path="/var/lib/kubelet/pods/f690c0a2-ff48-4859-978f-b6e65ad65592/volumes" Nov 28 17:49:21 crc kubenswrapper[4884]: I1128 17:49:21.242966 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:49:21 crc kubenswrapper[4884]: I1128 17:49:21.243650 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:49:51 crc kubenswrapper[4884]: I1128 17:49:51.243565 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:49:51 crc kubenswrapper[4884]: I1128 17:49:51.244003 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:50:21 crc kubenswrapper[4884]: I1128 17:50:21.242700 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:50:21 crc kubenswrapper[4884]: I1128 17:50:21.243374 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:50:21 crc kubenswrapper[4884]: I1128 17:50:21.243431 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:50:21 crc kubenswrapper[4884]: I1128 17:50:21.244427 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:50:21 crc kubenswrapper[4884]: I1128 17:50:21.244492 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22" gracePeriod=600 Nov 28 17:50:22 crc kubenswrapper[4884]: I1128 17:50:22.355176 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22" exitCode=0 Nov 28 17:50:22 crc kubenswrapper[4884]: I1128 17:50:22.355261 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22"} Nov 28 17:50:22 crc kubenswrapper[4884]: I1128 17:50:22.355583 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb"} Nov 28 17:50:22 crc kubenswrapper[4884]: I1128 17:50:22.355605 4884 scope.go:117] "RemoveContainer" containerID="80e0ed8a0f5cc904956d749b98a5c384242be4da4f552b4d4757e47ed92c9c42" Nov 28 17:52:51 crc kubenswrapper[4884]: I1128 17:52:51.243265 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:52:51 crc kubenswrapper[4884]: I1128 17:52:51.243994 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:53:21 crc kubenswrapper[4884]: I1128 17:53:21.243026 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:53:21 crc kubenswrapper[4884]: I1128 17:53:21.243715 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:53:38 crc kubenswrapper[4884]: I1128 17:53:38.248929 4884 scope.go:117] "RemoveContainer" containerID="592cb726d126c7ffb111ecab277ed4ef3dd58dd13a23064ba5e2d8426e6ae74b" Nov 28 17:53:38 crc kubenswrapper[4884]: I1128 17:53:38.271349 4884 scope.go:117] "RemoveContainer" containerID="fb73ab54cfc732baec6107b8acbb9251fe7aa4b8b1062fdd6bbdd1703d508a26" Nov 28 17:53:38 crc kubenswrapper[4884]: I1128 17:53:38.290724 4884 scope.go:117] "RemoveContainer" containerID="2d6fefa8958d9a547e5e4eae4707ac6667b17a9a49c9d4775f082117e351090c" Nov 28 17:53:51 crc kubenswrapper[4884]: I1128 17:53:51.243117 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 17:53:51 crc kubenswrapper[4884]: I1128 17:53:51.243716 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 17:53:51 crc kubenswrapper[4884]: I1128 17:53:51.243769 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 17:53:51 crc kubenswrapper[4884]: I1128 17:53:51.244654 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 17:53:51 crc kubenswrapper[4884]: I1128 17:53:51.244716 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" gracePeriod=600 Nov 28 17:53:51 crc kubenswrapper[4884]: E1128 17:53:51.368262 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:53:52 crc kubenswrapper[4884]: I1128 17:53:52.112330 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" exitCode=0 Nov 28 17:53:52 crc kubenswrapper[4884]: I1128 17:53:52.112377 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb"} Nov 28 17:53:52 crc kubenswrapper[4884]: I1128 17:53:52.112702 4884 scope.go:117] "RemoveContainer" containerID="8da67facbdf6017db45997b6d5923e14c66759a6fb483d179955b6b949472e22" Nov 28 17:53:52 crc kubenswrapper[4884]: I1128 17:53:52.113668 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:53:52 crc kubenswrapper[4884]: E1128 17:53:52.114161 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:06 crc kubenswrapper[4884]: I1128 17:54:06.689225 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:54:06 crc kubenswrapper[4884]: E1128 17:54:06.690131 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:19 crc kubenswrapper[4884]: I1128 17:54:19.690950 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:54:19 crc kubenswrapper[4884]: E1128 17:54:19.692644 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:30 crc kubenswrapper[4884]: I1128 17:54:30.696715 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:54:30 crc kubenswrapper[4884]: E1128 17:54:30.698646 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:36 crc kubenswrapper[4884]: E1128 17:54:36.243781 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Nov 28 17:54:37 crc kubenswrapper[4884]: I1128 17:54:37.621885 4884 generic.go:334] "Generic (PLEG): container finished" podID="c214f7b4-074f-4801-b6b6-375669694260" containerID="11cd60dfc6c550aa4573e65abde50cace7a1f9370001e176efea1dae01f47747" exitCode=0 Nov 28 17:54:37 crc kubenswrapper[4884]: I1128 17:54:37.621992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" event={"ID":"c214f7b4-074f-4801-b6b6-375669694260","Type":"ContainerDied","Data":"11cd60dfc6c550aa4573e65abde50cace7a1f9370001e176efea1dae01f47747"} Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.129105 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306513 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306599 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spq85\" (UniqueName: \"kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306712 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306805 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.306849 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle\") pod \"c214f7b4-074f-4801-b6b6-375669694260\" (UID: \"c214f7b4-074f-4801-b6b6-375669694260\") " Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.313443 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph" (OuterVolumeSpecName: "ceph") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.313636 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85" (OuterVolumeSpecName: "kube-api-access-spq85") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "kube-api-access-spq85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.316804 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.352940 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.353965 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.374265 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory" (OuterVolumeSpecName: "inventory") pod "c214f7b4-074f-4801-b6b6-375669694260" (UID: "c214f7b4-074f-4801-b6b6-375669694260"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409622 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409657 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409670 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409680 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spq85\" (UniqueName: \"kubernetes.io/projected/c214f7b4-074f-4801-b6b6-375669694260-kube-api-access-spq85\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409694 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.409703 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c214f7b4-074f-4801-b6b6-375669694260-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.647351 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" event={"ID":"c214f7b4-074f-4801-b6b6-375669694260","Type":"ContainerDied","Data":"e8b09e888218fb287b8c002b21115977b3f93151ecd4ae95681da9db059b89cc"} Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.647394 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8b09e888218fb287b8c002b21115977b3f93151ecd4ae95681da9db059b89cc" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.647409 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-xgw8z" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.750301 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-42kgt"] Nov 28 17:54:39 crc kubenswrapper[4884]: E1128 17:54:39.753692 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="extract-utilities" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.753724 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="extract-utilities" Nov 28 17:54:39 crc kubenswrapper[4884]: E1128 17:54:39.753746 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="extract-content" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.753752 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="extract-content" Nov 28 17:54:39 crc kubenswrapper[4884]: E1128 17:54:39.753766 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c214f7b4-074f-4801-b6b6-375669694260" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.753772 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c214f7b4-074f-4801-b6b6-375669694260" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 17:54:39 crc kubenswrapper[4884]: E1128 17:54:39.753799 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="registry-server" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.753805 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="registry-server" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.754012 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f690c0a2-ff48-4859-978f-b6e65ad65592" containerName="registry-server" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.754039 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c214f7b4-074f-4801-b6b6-375669694260" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.754852 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.758047 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.758530 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.758856 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.760978 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.767720 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.770934 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-42kgt"] Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.919888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.920004 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.920063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.920122 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhm6f\" (UniqueName: \"kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.920159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:39 crc kubenswrapper[4884]: I1128 17:54:39.920292 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.022316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.022493 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.022604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.022725 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.022997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhm6f\" (UniqueName: \"kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.023631 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.027274 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.028850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.029020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.031048 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.032536 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.047391 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhm6f\" (UniqueName: \"kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f\") pod \"neutron-dhcp-openstack-openstack-cell1-42kgt\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.080652 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.659621 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-42kgt"] Nov 28 17:54:40 crc kubenswrapper[4884]: I1128 17:54:40.665770 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 17:54:41 crc kubenswrapper[4884]: I1128 17:54:41.671762 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" event={"ID":"436513f6-b254-482c-96a2-12faf0ab7f10","Type":"ContainerStarted","Data":"1248db5d29825d1a357d656e4ffed69408ecba84fb1bf8e64d1f93ef468f8424"} Nov 28 17:54:41 crc kubenswrapper[4884]: I1128 17:54:41.672185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" event={"ID":"436513f6-b254-482c-96a2-12faf0ab7f10","Type":"ContainerStarted","Data":"a5fed306fd82f6cfa570c7aa08f2bec6633d71176e5c6431df9defbce5fe7948"} Nov 28 17:54:41 crc kubenswrapper[4884]: I1128 17:54:41.696262 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" podStartSLOduration=2.156807634 podStartE2EDuration="2.696240287s" podCreationTimestamp="2025-11-28 17:54:39 +0000 UTC" firstStartedPulling="2025-11-28 17:54:40.665462098 +0000 UTC m=+9320.228245909" lastFinishedPulling="2025-11-28 17:54:41.204894761 +0000 UTC m=+9320.767678562" observedRunningTime="2025-11-28 17:54:41.688337812 +0000 UTC m=+9321.251121613" watchObservedRunningTime="2025-11-28 17:54:41.696240287 +0000 UTC m=+9321.259024088" Nov 28 17:54:44 crc kubenswrapper[4884]: I1128 17:54:44.689135 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:54:44 crc kubenswrapper[4884]: E1128 17:54:44.689911 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.543621 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.554525 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.572599 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.670148 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.670456 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.670639 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntjqw\" (UniqueName: \"kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.688886 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:54:57 crc kubenswrapper[4884]: E1128 17:54:57.689230 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.773470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.773541 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.773699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntjqw\" (UniqueName: \"kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.774047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.774047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.798332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntjqw\" (UniqueName: \"kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw\") pod \"community-operators-5n6ql\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:57 crc kubenswrapper[4884]: I1128 17:54:57.882017 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:54:58 crc kubenswrapper[4884]: W1128 17:54:58.500387 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc43a9794_610b_4497_af67_c3bdfdb7fac6.slice/crio-2b0497e4799b6b3380b297b1cdd804dca27a208456a35827327436466bf7ee74 WatchSource:0}: Error finding container 2b0497e4799b6b3380b297b1cdd804dca27a208456a35827327436466bf7ee74: Status 404 returned error can't find the container with id 2b0497e4799b6b3380b297b1cdd804dca27a208456a35827327436466bf7ee74 Nov 28 17:54:58 crc kubenswrapper[4884]: I1128 17:54:58.505074 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:54:58 crc kubenswrapper[4884]: I1128 17:54:58.848018 4884 generic.go:334] "Generic (PLEG): container finished" podID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerID="2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf" exitCode=0 Nov 28 17:54:58 crc kubenswrapper[4884]: I1128 17:54:58.848588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerDied","Data":"2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf"} Nov 28 17:54:58 crc kubenswrapper[4884]: I1128 17:54:58.849026 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerStarted","Data":"2b0497e4799b6b3380b297b1cdd804dca27a208456a35827327436466bf7ee74"} Nov 28 17:54:59 crc kubenswrapper[4884]: I1128 17:54:59.861378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerStarted","Data":"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9"} Nov 28 17:55:00 crc kubenswrapper[4884]: I1128 17:55:00.872361 4884 generic.go:334] "Generic (PLEG): container finished" podID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerID="d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9" exitCode=0 Nov 28 17:55:00 crc kubenswrapper[4884]: I1128 17:55:00.872732 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerDied","Data":"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9"} Nov 28 17:55:01 crc kubenswrapper[4884]: I1128 17:55:01.898835 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerStarted","Data":"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea"} Nov 28 17:55:01 crc kubenswrapper[4884]: I1128 17:55:01.923385 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5n6ql" podStartSLOduration=2.47342345 podStartE2EDuration="4.923361579s" podCreationTimestamp="2025-11-28 17:54:57 +0000 UTC" firstStartedPulling="2025-11-28 17:54:58.850469492 +0000 UTC m=+9338.413253303" lastFinishedPulling="2025-11-28 17:55:01.300407621 +0000 UTC m=+9340.863191432" observedRunningTime="2025-11-28 17:55:01.916052718 +0000 UTC m=+9341.478836519" watchObservedRunningTime="2025-11-28 17:55:01.923361579 +0000 UTC m=+9341.486145380" Nov 28 17:55:07 crc kubenswrapper[4884]: I1128 17:55:07.882935 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:07 crc kubenswrapper[4884]: I1128 17:55:07.883551 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:07 crc kubenswrapper[4884]: I1128 17:55:07.932657 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:08 crc kubenswrapper[4884]: I1128 17:55:08.011582 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:08 crc kubenswrapper[4884]: I1128 17:55:08.174931 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:55:08 crc kubenswrapper[4884]: I1128 17:55:08.689792 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:55:08 crc kubenswrapper[4884]: E1128 17:55:08.690389 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:55:09 crc kubenswrapper[4884]: I1128 17:55:09.979945 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5n6ql" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="registry-server" containerID="cri-o://08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea" gracePeriod=2 Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.567646 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.678883 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content\") pod \"c43a9794-610b-4497-af67-c3bdfdb7fac6\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.679272 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntjqw\" (UniqueName: \"kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw\") pod \"c43a9794-610b-4497-af67-c3bdfdb7fac6\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.679357 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities\") pod \"c43a9794-610b-4497-af67-c3bdfdb7fac6\" (UID: \"c43a9794-610b-4497-af67-c3bdfdb7fac6\") " Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.680356 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities" (OuterVolumeSpecName: "utilities") pod "c43a9794-610b-4497-af67-c3bdfdb7fac6" (UID: "c43a9794-610b-4497-af67-c3bdfdb7fac6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.685896 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw" (OuterVolumeSpecName: "kube-api-access-ntjqw") pod "c43a9794-610b-4497-af67-c3bdfdb7fac6" (UID: "c43a9794-610b-4497-af67-c3bdfdb7fac6"). InnerVolumeSpecName "kube-api-access-ntjqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.741894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c43a9794-610b-4497-af67-c3bdfdb7fac6" (UID: "c43a9794-610b-4497-af67-c3bdfdb7fac6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.781854 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.781895 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c43a9794-610b-4497-af67-c3bdfdb7fac6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.781912 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntjqw\" (UniqueName: \"kubernetes.io/projected/c43a9794-610b-4497-af67-c3bdfdb7fac6-kube-api-access-ntjqw\") on node \"crc\" DevicePath \"\"" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.994207 4884 generic.go:334] "Generic (PLEG): container finished" podID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerID="08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea" exitCode=0 Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.994471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerDied","Data":"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea"} Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.994536 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5n6ql" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.995457 4884 scope.go:117] "RemoveContainer" containerID="08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea" Nov 28 17:55:10 crc kubenswrapper[4884]: I1128 17:55:10.995412 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5n6ql" event={"ID":"c43a9794-610b-4497-af67-c3bdfdb7fac6","Type":"ContainerDied","Data":"2b0497e4799b6b3380b297b1cdd804dca27a208456a35827327436466bf7ee74"} Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.034001 4884 scope.go:117] "RemoveContainer" containerID="d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.044063 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.056249 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5n6ql"] Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.070949 4884 scope.go:117] "RemoveContainer" containerID="2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.156653 4884 scope.go:117] "RemoveContainer" containerID="08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea" Nov 28 17:55:11 crc kubenswrapper[4884]: E1128 17:55:11.157233 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea\": container with ID starting with 08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea not found: ID does not exist" containerID="08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.157331 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea"} err="failed to get container status \"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea\": rpc error: code = NotFound desc = could not find container \"08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea\": container with ID starting with 08d0c1a63c6b67ce061d51001ea20f8c9be2161a4516ab4dbf13f851de56a3ea not found: ID does not exist" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.157363 4884 scope.go:117] "RemoveContainer" containerID="d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9" Nov 28 17:55:11 crc kubenswrapper[4884]: E1128 17:55:11.157809 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9\": container with ID starting with d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9 not found: ID does not exist" containerID="d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.157878 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9"} err="failed to get container status \"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9\": rpc error: code = NotFound desc = could not find container \"d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9\": container with ID starting with d937122fddc8450c8e29204f72291e16efbb856d4b1f1043c1f637127a68dea9 not found: ID does not exist" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.157922 4884 scope.go:117] "RemoveContainer" containerID="2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf" Nov 28 17:55:11 crc kubenswrapper[4884]: E1128 17:55:11.158290 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf\": container with ID starting with 2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf not found: ID does not exist" containerID="2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf" Nov 28 17:55:11 crc kubenswrapper[4884]: I1128 17:55:11.158328 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf"} err="failed to get container status \"2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf\": rpc error: code = NotFound desc = could not find container \"2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf\": container with ID starting with 2b0b82bffded445e62da9b1e3218b1ac05d55893d709daf8e0d13e9d85450aaf not found: ID does not exist" Nov 28 17:55:12 crc kubenswrapper[4884]: I1128 17:55:12.703177 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" path="/var/lib/kubelet/pods/c43a9794-610b-4497-af67-c3bdfdb7fac6/volumes" Nov 28 17:55:23 crc kubenswrapper[4884]: I1128 17:55:23.689244 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:55:23 crc kubenswrapper[4884]: E1128 17:55:23.689916 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:55:36 crc kubenswrapper[4884]: I1128 17:55:36.688450 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:55:36 crc kubenswrapper[4884]: E1128 17:55:36.689453 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:55:48 crc kubenswrapper[4884]: I1128 17:55:48.689028 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:55:48 crc kubenswrapper[4884]: E1128 17:55:48.690249 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:03 crc kubenswrapper[4884]: I1128 17:56:03.689467 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:56:03 crc kubenswrapper[4884]: E1128 17:56:03.690343 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:15 crc kubenswrapper[4884]: I1128 17:56:15.689322 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:56:15 crc kubenswrapper[4884]: E1128 17:56:15.691679 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:28 crc kubenswrapper[4884]: I1128 17:56:28.689777 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:56:28 crc kubenswrapper[4884]: E1128 17:56:28.690528 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:42 crc kubenswrapper[4884]: I1128 17:56:42.689251 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:56:42 crc kubenswrapper[4884]: E1128 17:56:42.690139 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:43 crc kubenswrapper[4884]: I1128 17:56:43.757550 4884 trace.go:236] Trace[1651125820]: "Calculate volume metrics of mysql-db for pod openstack/openstack-cell1-galera-0" (28-Nov-2025 17:56:42.752) (total time: 1005ms): Nov 28 17:56:43 crc kubenswrapper[4884]: Trace[1651125820]: [1.005252588s] [1.005252588s] END Nov 28 17:56:54 crc kubenswrapper[4884]: I1128 17:56:54.689110 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:56:54 crc kubenswrapper[4884]: E1128 17:56:54.690004 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.319066 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:56:55 crc kubenswrapper[4884]: E1128 17:56:55.319617 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="registry-server" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.319643 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="registry-server" Nov 28 17:56:55 crc kubenswrapper[4884]: E1128 17:56:55.319657 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="extract-utilities" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.319666 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="extract-utilities" Nov 28 17:56:55 crc kubenswrapper[4884]: E1128 17:56:55.319673 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="extract-content" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.319679 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="extract-content" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.319925 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c43a9794-610b-4497-af67-c3bdfdb7fac6" containerName="registry-server" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.321750 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.340778 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.443329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.443388 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.443615 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj4xv\" (UniqueName: \"kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.546349 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.546407 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.546472 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj4xv\" (UniqueName: \"kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.546925 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.547045 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.565327 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj4xv\" (UniqueName: \"kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv\") pod \"redhat-marketplace-ktmzw\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:55 crc kubenswrapper[4884]: I1128 17:56:55.649619 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:56:56 crc kubenswrapper[4884]: I1128 17:56:56.160445 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:56:56 crc kubenswrapper[4884]: I1128 17:56:56.232808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerStarted","Data":"cb8a8fddf219f7037f3074daea1557605dcaf395b96e55f008ea4402d7e97429"} Nov 28 17:56:57 crc kubenswrapper[4884]: I1128 17:56:57.244314 4884 generic.go:334] "Generic (PLEG): container finished" podID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerID="a8bc4f41207c749a7a0a15057c78350f413e2bea0337d867e804c18decc8fd98" exitCode=0 Nov 28 17:56:57 crc kubenswrapper[4884]: I1128 17:56:57.244390 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerDied","Data":"a8bc4f41207c749a7a0a15057c78350f413e2bea0337d867e804c18decc8fd98"} Nov 28 17:56:59 crc kubenswrapper[4884]: I1128 17:56:59.265232 4884 generic.go:334] "Generic (PLEG): container finished" podID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerID="6805a6d92086d007206719df975d432d06bbe02135854483346a255e08924e7d" exitCode=0 Nov 28 17:56:59 crc kubenswrapper[4884]: I1128 17:56:59.265365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerDied","Data":"6805a6d92086d007206719df975d432d06bbe02135854483346a255e08924e7d"} Nov 28 17:57:00 crc kubenswrapper[4884]: I1128 17:57:00.279785 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerStarted","Data":"63573b6588298df5b483bedd8107aa5871236a9e1bfe94f31d6fd5811319110a"} Nov 28 17:57:00 crc kubenswrapper[4884]: I1128 17:57:00.328932 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ktmzw" podStartSLOduration=2.869673864 podStartE2EDuration="5.328914472s" podCreationTimestamp="2025-11-28 17:56:55 +0000 UTC" firstStartedPulling="2025-11-28 17:56:57.247563716 +0000 UTC m=+9456.810347537" lastFinishedPulling="2025-11-28 17:56:59.706804334 +0000 UTC m=+9459.269588145" observedRunningTime="2025-11-28 17:57:00.30941263 +0000 UTC m=+9459.872196481" watchObservedRunningTime="2025-11-28 17:57:00.328914472 +0000 UTC m=+9459.891698273" Nov 28 17:57:05 crc kubenswrapper[4884]: I1128 17:57:05.650048 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:05 crc kubenswrapper[4884]: I1128 17:57:05.650605 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:05 crc kubenswrapper[4884]: I1128 17:57:05.694799 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:06 crc kubenswrapper[4884]: I1128 17:57:06.692063 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:57:06 crc kubenswrapper[4884]: E1128 17:57:06.692442 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:57:06 crc kubenswrapper[4884]: I1128 17:57:06.991344 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:07 crc kubenswrapper[4884]: I1128 17:57:07.047436 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:57:08 crc kubenswrapper[4884]: I1128 17:57:08.354784 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ktmzw" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="registry-server" containerID="cri-o://63573b6588298df5b483bedd8107aa5871236a9e1bfe94f31d6fd5811319110a" gracePeriod=2 Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.366011 4884 generic.go:334] "Generic (PLEG): container finished" podID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerID="63573b6588298df5b483bedd8107aa5871236a9e1bfe94f31d6fd5811319110a" exitCode=0 Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.366062 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerDied","Data":"63573b6588298df5b483bedd8107aa5871236a9e1bfe94f31d6fd5811319110a"} Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.834474 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.871924 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities\") pod \"a48c6d2c-ee54-47e6-9b23-312c0465f348\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.872225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content\") pod \"a48c6d2c-ee54-47e6-9b23-312c0465f348\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.872263 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj4xv\" (UniqueName: \"kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv\") pod \"a48c6d2c-ee54-47e6-9b23-312c0465f348\" (UID: \"a48c6d2c-ee54-47e6-9b23-312c0465f348\") " Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.875307 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities" (OuterVolumeSpecName: "utilities") pod "a48c6d2c-ee54-47e6-9b23-312c0465f348" (UID: "a48c6d2c-ee54-47e6-9b23-312c0465f348"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.882391 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv" (OuterVolumeSpecName: "kube-api-access-pj4xv") pod "a48c6d2c-ee54-47e6-9b23-312c0465f348" (UID: "a48c6d2c-ee54-47e6-9b23-312c0465f348"). InnerVolumeSpecName "kube-api-access-pj4xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.895977 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a48c6d2c-ee54-47e6-9b23-312c0465f348" (UID: "a48c6d2c-ee54-47e6-9b23-312c0465f348"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.974581 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.975114 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj4xv\" (UniqueName: \"kubernetes.io/projected/a48c6d2c-ee54-47e6-9b23-312c0465f348-kube-api-access-pj4xv\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:09 crc kubenswrapper[4884]: I1128 17:57:09.975187 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48c6d2c-ee54-47e6-9b23-312c0465f348-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.380339 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ktmzw" event={"ID":"a48c6d2c-ee54-47e6-9b23-312c0465f348","Type":"ContainerDied","Data":"cb8a8fddf219f7037f3074daea1557605dcaf395b96e55f008ea4402d7e97429"} Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.380396 4884 scope.go:117] "RemoveContainer" containerID="63573b6588298df5b483bedd8107aa5871236a9e1bfe94f31d6fd5811319110a" Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.380568 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ktmzw" Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.413177 4884 scope.go:117] "RemoveContainer" containerID="6805a6d92086d007206719df975d432d06bbe02135854483346a255e08924e7d" Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.423623 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.436953 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ktmzw"] Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.449931 4884 scope.go:117] "RemoveContainer" containerID="a8bc4f41207c749a7a0a15057c78350f413e2bea0337d867e804c18decc8fd98" Nov 28 17:57:10 crc kubenswrapper[4884]: I1128 17:57:10.701827 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" path="/var/lib/kubelet/pods/a48c6d2c-ee54-47e6-9b23-312c0465f348/volumes" Nov 28 17:57:19 crc kubenswrapper[4884]: I1128 17:57:19.688061 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:57:19 crc kubenswrapper[4884]: E1128 17:57:19.688949 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:57:33 crc kubenswrapper[4884]: I1128 17:57:33.688912 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:57:33 crc kubenswrapper[4884]: E1128 17:57:33.689963 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:57:47 crc kubenswrapper[4884]: I1128 17:57:47.689550 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:57:47 crc kubenswrapper[4884]: E1128 17:57:47.690771 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:57:59 crc kubenswrapper[4884]: I1128 17:57:59.688353 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:57:59 crc kubenswrapper[4884]: E1128 17:57:59.689180 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.274606 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:01 crc kubenswrapper[4884]: E1128 17:58:01.276162 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="extract-utilities" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.276273 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="extract-utilities" Nov 28 17:58:01 crc kubenswrapper[4884]: E1128 17:58:01.276350 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="extract-content" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.276412 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="extract-content" Nov 28 17:58:01 crc kubenswrapper[4884]: E1128 17:58:01.276498 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="registry-server" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.276559 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="registry-server" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.276906 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a48c6d2c-ee54-47e6-9b23-312c0465f348" containerName="registry-server" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.278991 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.292645 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.415777 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.415920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.415970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gkb5\" (UniqueName: \"kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.518574 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.519206 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.519379 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gkb5\" (UniqueName: \"kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.519831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.520114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.545917 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gkb5\" (UniqueName: \"kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5\") pod \"redhat-operators-5vxdv\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:01 crc kubenswrapper[4884]: I1128 17:58:01.608647 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:02 crc kubenswrapper[4884]: I1128 17:58:02.096010 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:02 crc kubenswrapper[4884]: I1128 17:58:02.948518 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerID="c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3" exitCode=0 Nov 28 17:58:02 crc kubenswrapper[4884]: I1128 17:58:02.948616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerDied","Data":"c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3"} Nov 28 17:58:02 crc kubenswrapper[4884]: I1128 17:58:02.948985 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerStarted","Data":"7f86f3ad9cae2193875e792286a8171c07a8835078fd1c09a6ac135cd4d73d84"} Nov 28 17:58:04 crc kubenswrapper[4884]: I1128 17:58:04.985383 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerStarted","Data":"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed"} Nov 28 17:58:07 crc kubenswrapper[4884]: I1128 17:58:07.008864 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerID="81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed" exitCode=0 Nov 28 17:58:07 crc kubenswrapper[4884]: I1128 17:58:07.008932 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerDied","Data":"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed"} Nov 28 17:58:09 crc kubenswrapper[4884]: I1128 17:58:09.035299 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerStarted","Data":"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb"} Nov 28 17:58:09 crc kubenswrapper[4884]: I1128 17:58:09.063154 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5vxdv" podStartSLOduration=3.095265768 podStartE2EDuration="8.063123305s" podCreationTimestamp="2025-11-28 17:58:01 +0000 UTC" firstStartedPulling="2025-11-28 17:58:02.950917872 +0000 UTC m=+9522.513701683" lastFinishedPulling="2025-11-28 17:58:07.918775419 +0000 UTC m=+9527.481559220" observedRunningTime="2025-11-28 17:58:09.054640196 +0000 UTC m=+9528.617424037" watchObservedRunningTime="2025-11-28 17:58:09.063123305 +0000 UTC m=+9528.625907116" Nov 28 17:58:11 crc kubenswrapper[4884]: I1128 17:58:11.609453 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:11 crc kubenswrapper[4884]: I1128 17:58:11.609870 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:12 crc kubenswrapper[4884]: I1128 17:58:12.651676 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5vxdv" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="registry-server" probeResult="failure" output=< Nov 28 17:58:12 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 17:58:12 crc kubenswrapper[4884]: > Nov 28 17:58:14 crc kubenswrapper[4884]: I1128 17:58:14.689290 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:58:14 crc kubenswrapper[4884]: E1128 17:58:14.689989 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:58:21 crc kubenswrapper[4884]: I1128 17:58:21.806188 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:21 crc kubenswrapper[4884]: I1128 17:58:21.870423 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:22 crc kubenswrapper[4884]: I1128 17:58:22.059715 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.199176 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5vxdv" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="registry-server" containerID="cri-o://092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb" gracePeriod=2 Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.739413 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.871992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities\") pod \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.872186 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gkb5\" (UniqueName: \"kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5\") pod \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.872271 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content\") pod \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\" (UID: \"9a72f356-e59d-415b-b40d-bd171ae5ca8a\") " Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.873704 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities" (OuterVolumeSpecName: "utilities") pod "9a72f356-e59d-415b-b40d-bd171ae5ca8a" (UID: "9a72f356-e59d-415b-b40d-bd171ae5ca8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.878148 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5" (OuterVolumeSpecName: "kube-api-access-8gkb5") pod "9a72f356-e59d-415b-b40d-bd171ae5ca8a" (UID: "9a72f356-e59d-415b-b40d-bd171ae5ca8a"). InnerVolumeSpecName "kube-api-access-8gkb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.974561 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.974601 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gkb5\" (UniqueName: \"kubernetes.io/projected/9a72f356-e59d-415b-b40d-bd171ae5ca8a-kube-api-access-8gkb5\") on node \"crc\" DevicePath \"\"" Nov 28 17:58:23 crc kubenswrapper[4884]: I1128 17:58:23.987420 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a72f356-e59d-415b-b40d-bd171ae5ca8a" (UID: "9a72f356-e59d-415b-b40d-bd171ae5ca8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.076165 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a72f356-e59d-415b-b40d-bd171ae5ca8a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.213030 4884 generic.go:334] "Generic (PLEG): container finished" podID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerID="092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb" exitCode=0 Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.213083 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerDied","Data":"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb"} Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.213184 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vxdv" event={"ID":"9a72f356-e59d-415b-b40d-bd171ae5ca8a","Type":"ContainerDied","Data":"7f86f3ad9cae2193875e792286a8171c07a8835078fd1c09a6ac135cd4d73d84"} Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.213185 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vxdv" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.213215 4884 scope.go:117] "RemoveContainer" containerID="092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.236079 4884 scope.go:117] "RemoveContainer" containerID="81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.277502 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.281912 4884 scope.go:117] "RemoveContainer" containerID="c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.287213 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5vxdv"] Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.332788 4884 scope.go:117] "RemoveContainer" containerID="092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb" Nov 28 17:58:24 crc kubenswrapper[4884]: E1128 17:58:24.333396 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb\": container with ID starting with 092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb not found: ID does not exist" containerID="092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.333451 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb"} err="failed to get container status \"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb\": rpc error: code = NotFound desc = could not find container \"092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb\": container with ID starting with 092747d2a6203195f1612c2130618e543240c131ffc1d3ebf28d5836c5302bdb not found: ID does not exist" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.333486 4884 scope.go:117] "RemoveContainer" containerID="81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed" Nov 28 17:58:24 crc kubenswrapper[4884]: E1128 17:58:24.333973 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed\": container with ID starting with 81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed not found: ID does not exist" containerID="81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.334023 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed"} err="failed to get container status \"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed\": rpc error: code = NotFound desc = could not find container \"81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed\": container with ID starting with 81e195fc62f77b9420baa3282e8e5ed7ea913b03d0ea40d3cd35bae92c59e0ed not found: ID does not exist" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.334069 4884 scope.go:117] "RemoveContainer" containerID="c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3" Nov 28 17:58:24 crc kubenswrapper[4884]: E1128 17:58:24.334484 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3\": container with ID starting with c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3 not found: ID does not exist" containerID="c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.334519 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3"} err="failed to get container status \"c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3\": rpc error: code = NotFound desc = could not find container \"c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3\": container with ID starting with c59e3e442ce9475541b9be69c989371c8721a6f013508aaef52bda5703c078d3 not found: ID does not exist" Nov 28 17:58:24 crc kubenswrapper[4884]: I1128 17:58:24.706891 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" path="/var/lib/kubelet/pods/9a72f356-e59d-415b-b40d-bd171ae5ca8a/volumes" Nov 28 17:58:29 crc kubenswrapper[4884]: I1128 17:58:29.688411 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:58:29 crc kubenswrapper[4884]: E1128 17:58:29.689809 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:58:41 crc kubenswrapper[4884]: I1128 17:58:41.689199 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:58:41 crc kubenswrapper[4884]: E1128 17:58:41.690437 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 17:58:55 crc kubenswrapper[4884]: I1128 17:58:55.688877 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 17:58:56 crc kubenswrapper[4884]: I1128 17:58:56.606304 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e"} Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.169654 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h"] Nov 28 18:00:00 crc kubenswrapper[4884]: E1128 18:00:00.170885 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="extract-utilities" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.170930 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="extract-utilities" Nov 28 18:00:00 crc kubenswrapper[4884]: E1128 18:00:00.170961 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="registry-server" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.170971 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="registry-server" Nov 28 18:00:00 crc kubenswrapper[4884]: E1128 18:00:00.171011 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="extract-content" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.171021 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="extract-content" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.171435 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a72f356-e59d-415b-b40d-bd171ae5ca8a" containerName="registry-server" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.172783 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.175585 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.175590 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.183287 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h"] Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.269520 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.269852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrk6p\" (UniqueName: \"kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.269969 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.372208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.372262 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrk6p\" (UniqueName: \"kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.372324 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.373477 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.386264 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.396200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrk6p\" (UniqueName: \"kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p\") pod \"collect-profiles-29405880-dn82h\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:00 crc kubenswrapper[4884]: I1128 18:00:00.498595 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:01 crc kubenswrapper[4884]: I1128 18:00:01.009185 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h"] Nov 28 18:00:01 crc kubenswrapper[4884]: I1128 18:00:01.307649 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" event={"ID":"74430eea-98d0-4e29-b6af-4a5516345095","Type":"ContainerStarted","Data":"12ae99bfa7a64a5bdfcee4e80bb8208632f27620955d2abe7b1c04a7c4336eca"} Nov 28 18:00:01 crc kubenswrapper[4884]: I1128 18:00:01.307966 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" event={"ID":"74430eea-98d0-4e29-b6af-4a5516345095","Type":"ContainerStarted","Data":"97ba9f91db381b9c02fb051918cf90038e8af700b608f1c17e92aca9bd4ade74"} Nov 28 18:00:01 crc kubenswrapper[4884]: I1128 18:00:01.327988 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" podStartSLOduration=1.3279689700000001 podStartE2EDuration="1.32796897s" podCreationTimestamp="2025-11-28 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:00:01.325055477 +0000 UTC m=+9640.887839278" watchObservedRunningTime="2025-11-28 18:00:01.32796897 +0000 UTC m=+9640.890752771" Nov 28 18:00:02 crc kubenswrapper[4884]: I1128 18:00:02.333586 4884 generic.go:334] "Generic (PLEG): container finished" podID="74430eea-98d0-4e29-b6af-4a5516345095" containerID="12ae99bfa7a64a5bdfcee4e80bb8208632f27620955d2abe7b1c04a7c4336eca" exitCode=0 Nov 28 18:00:02 crc kubenswrapper[4884]: I1128 18:00:02.333926 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" event={"ID":"74430eea-98d0-4e29-b6af-4a5516345095","Type":"ContainerDied","Data":"12ae99bfa7a64a5bdfcee4e80bb8208632f27620955d2abe7b1c04a7c4336eca"} Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.719224 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.839841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume\") pod \"74430eea-98d0-4e29-b6af-4a5516345095\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.840282 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrk6p\" (UniqueName: \"kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p\") pod \"74430eea-98d0-4e29-b6af-4a5516345095\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.840608 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume\") pod \"74430eea-98d0-4e29-b6af-4a5516345095\" (UID: \"74430eea-98d0-4e29-b6af-4a5516345095\") " Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.841559 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume" (OuterVolumeSpecName: "config-volume") pod "74430eea-98d0-4e29-b6af-4a5516345095" (UID: "74430eea-98d0-4e29-b6af-4a5516345095"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.846657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "74430eea-98d0-4e29-b6af-4a5516345095" (UID: "74430eea-98d0-4e29-b6af-4a5516345095"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.846801 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p" (OuterVolumeSpecName: "kube-api-access-rrk6p") pod "74430eea-98d0-4e29-b6af-4a5516345095" (UID: "74430eea-98d0-4e29-b6af-4a5516345095"). InnerVolumeSpecName "kube-api-access-rrk6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.943608 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74430eea-98d0-4e29-b6af-4a5516345095-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.943849 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74430eea-98d0-4e29-b6af-4a5516345095-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:03 crc kubenswrapper[4884]: I1128 18:00:03.943909 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrk6p\" (UniqueName: \"kubernetes.io/projected/74430eea-98d0-4e29-b6af-4a5516345095-kube-api-access-rrk6p\") on node \"crc\" DevicePath \"\"" Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.360858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" event={"ID":"74430eea-98d0-4e29-b6af-4a5516345095","Type":"ContainerDied","Data":"97ba9f91db381b9c02fb051918cf90038e8af700b608f1c17e92aca9bd4ade74"} Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.361290 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97ba9f91db381b9c02fb051918cf90038e8af700b608f1c17e92aca9bd4ade74" Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.360982 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405880-dn82h" Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.419304 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr"] Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.444017 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405835-g69nr"] Nov 28 18:00:04 crc kubenswrapper[4884]: I1128 18:00:04.706252 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5" path="/var/lib/kubelet/pods/dfe65d0d-0a3c-4bbf-83da-8c897b6f27e5/volumes" Nov 28 18:00:38 crc kubenswrapper[4884]: I1128 18:00:38.640631 4884 scope.go:117] "RemoveContainer" containerID="eb0a3b52512cbc0ed63b1caeb2d40764642de80f388afafb460443f5bfcc822f" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.149747 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405881-g8k7b"] Nov 28 18:01:00 crc kubenswrapper[4884]: E1128 18:01:00.150745 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74430eea-98d0-4e29-b6af-4a5516345095" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.150758 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="74430eea-98d0-4e29-b6af-4a5516345095" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.151006 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="74430eea-98d0-4e29-b6af-4a5516345095" containerName="collect-profiles" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.151978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.167385 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405881-g8k7b"] Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.271702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.271829 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.271871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2z45v\" (UniqueName: \"kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.271913 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.374158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.374225 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2z45v\" (UniqueName: \"kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.374270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.374371 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.449654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2z45v\" (UniqueName: \"kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.449937 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.449899 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.449841 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle\") pod \"keystone-cron-29405881-g8k7b\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.491516 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.931683 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405881-g8k7b"] Nov 28 18:01:00 crc kubenswrapper[4884]: W1128 18:01:00.932437 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6760a8ef_6d8d_4afa_b76f_64b152fec02f.slice/crio-a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d WatchSource:0}: Error finding container a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d: Status 404 returned error can't find the container with id a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d Nov 28 18:01:00 crc kubenswrapper[4884]: I1128 18:01:00.998903 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-g8k7b" event={"ID":"6760a8ef-6d8d-4afa-b76f-64b152fec02f","Type":"ContainerStarted","Data":"a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d"} Nov 28 18:01:02 crc kubenswrapper[4884]: I1128 18:01:02.010539 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-g8k7b" event={"ID":"6760a8ef-6d8d-4afa-b76f-64b152fec02f","Type":"ContainerStarted","Data":"3a33e618fbb4fdf23099a183c10dc8dc55b3d6841dd40746f0ee136eaf3da683"} Nov 28 18:01:02 crc kubenswrapper[4884]: I1128 18:01:02.031675 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405881-g8k7b" podStartSLOduration=2.031659841 podStartE2EDuration="2.031659841s" podCreationTimestamp="2025-11-28 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:01:02.029969979 +0000 UTC m=+9701.592753780" watchObservedRunningTime="2025-11-28 18:01:02.031659841 +0000 UTC m=+9701.594443642" Nov 28 18:01:05 crc kubenswrapper[4884]: I1128 18:01:05.045764 4884 generic.go:334] "Generic (PLEG): container finished" podID="6760a8ef-6d8d-4afa-b76f-64b152fec02f" containerID="3a33e618fbb4fdf23099a183c10dc8dc55b3d6841dd40746f0ee136eaf3da683" exitCode=0 Nov 28 18:01:05 crc kubenswrapper[4884]: I1128 18:01:05.045857 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-g8k7b" event={"ID":"6760a8ef-6d8d-4afa-b76f-64b152fec02f","Type":"ContainerDied","Data":"3a33e618fbb4fdf23099a183c10dc8dc55b3d6841dd40746f0ee136eaf3da683"} Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.466282 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.525336 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data\") pod \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.525626 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle\") pod \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.525653 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2z45v\" (UniqueName: \"kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v\") pod \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.525694 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys\") pod \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\" (UID: \"6760a8ef-6d8d-4afa-b76f-64b152fec02f\") " Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.532192 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6760a8ef-6d8d-4afa-b76f-64b152fec02f" (UID: "6760a8ef-6d8d-4afa-b76f-64b152fec02f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.535553 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v" (OuterVolumeSpecName: "kube-api-access-2z45v") pod "6760a8ef-6d8d-4afa-b76f-64b152fec02f" (UID: "6760a8ef-6d8d-4afa-b76f-64b152fec02f"). InnerVolumeSpecName "kube-api-access-2z45v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.576715 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6760a8ef-6d8d-4afa-b76f-64b152fec02f" (UID: "6760a8ef-6d8d-4afa-b76f-64b152fec02f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.578948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data" (OuterVolumeSpecName: "config-data") pod "6760a8ef-6d8d-4afa-b76f-64b152fec02f" (UID: "6760a8ef-6d8d-4afa-b76f-64b152fec02f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.628464 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.628496 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.628507 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2z45v\" (UniqueName: \"kubernetes.io/projected/6760a8ef-6d8d-4afa-b76f-64b152fec02f-kube-api-access-2z45v\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:06 crc kubenswrapper[4884]: I1128 18:01:06.628516 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6760a8ef-6d8d-4afa-b76f-64b152fec02f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:07 crc kubenswrapper[4884]: I1128 18:01:07.086171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405881-g8k7b" event={"ID":"6760a8ef-6d8d-4afa-b76f-64b152fec02f","Type":"ContainerDied","Data":"a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d"} Nov 28 18:01:07 crc kubenswrapper[4884]: I1128 18:01:07.086523 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a41a869dd95c00a43ba6ad96f8cb91e1e7ed15ce76358dcd3b572a0e1303974d" Nov 28 18:01:07 crc kubenswrapper[4884]: I1128 18:01:07.086219 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405881-g8k7b" Nov 28 18:01:21 crc kubenswrapper[4884]: I1128 18:01:21.243548 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:01:21 crc kubenswrapper[4884]: I1128 18:01:21.244499 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:01:33 crc kubenswrapper[4884]: I1128 18:01:33.943344 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:33 crc kubenswrapper[4884]: E1128 18:01:33.944632 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6760a8ef-6d8d-4afa-b76f-64b152fec02f" containerName="keystone-cron" Nov 28 18:01:33 crc kubenswrapper[4884]: I1128 18:01:33.944660 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6760a8ef-6d8d-4afa-b76f-64b152fec02f" containerName="keystone-cron" Nov 28 18:01:33 crc kubenswrapper[4884]: I1128 18:01:33.945058 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6760a8ef-6d8d-4afa-b76f-64b152fec02f" containerName="keystone-cron" Nov 28 18:01:33 crc kubenswrapper[4884]: I1128 18:01:33.947720 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:33 crc kubenswrapper[4884]: I1128 18:01:33.962959 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.106180 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwk76\" (UniqueName: \"kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.106559 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.106875 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.209124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwk76\" (UniqueName: \"kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.209229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.209353 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.210001 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.210021 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.232975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwk76\" (UniqueName: \"kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76\") pod \"certified-operators-hfkkk\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.283413 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:34 crc kubenswrapper[4884]: I1128 18:01:34.793949 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:35 crc kubenswrapper[4884]: I1128 18:01:35.405593 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerStarted","Data":"49545605061d51bc99ffd18905cd3afa2c67338e3cdf84fd316f732645feda99"} Nov 28 18:01:36 crc kubenswrapper[4884]: I1128 18:01:36.425707 4884 generic.go:334] "Generic (PLEG): container finished" podID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerID="70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e" exitCode=0 Nov 28 18:01:36 crc kubenswrapper[4884]: I1128 18:01:36.425812 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerDied","Data":"70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e"} Nov 28 18:01:36 crc kubenswrapper[4884]: I1128 18:01:36.431161 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:01:38 crc kubenswrapper[4884]: I1128 18:01:38.448721 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerStarted","Data":"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f"} Nov 28 18:01:39 crc kubenswrapper[4884]: I1128 18:01:39.462015 4884 generic.go:334] "Generic (PLEG): container finished" podID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerID="43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f" exitCode=0 Nov 28 18:01:39 crc kubenswrapper[4884]: I1128 18:01:39.462067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerDied","Data":"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f"} Nov 28 18:01:40 crc kubenswrapper[4884]: I1128 18:01:40.473352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerStarted","Data":"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6"} Nov 28 18:01:40 crc kubenswrapper[4884]: I1128 18:01:40.497699 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hfkkk" podStartSLOduration=3.848196922 podStartE2EDuration="7.497678672s" podCreationTimestamp="2025-11-28 18:01:33 +0000 UTC" firstStartedPulling="2025-11-28 18:01:36.430536439 +0000 UTC m=+9735.993320270" lastFinishedPulling="2025-11-28 18:01:40.080018179 +0000 UTC m=+9739.642802020" observedRunningTime="2025-11-28 18:01:40.495638862 +0000 UTC m=+9740.058422683" watchObservedRunningTime="2025-11-28 18:01:40.497678672 +0000 UTC m=+9740.060462483" Nov 28 18:01:44 crc kubenswrapper[4884]: I1128 18:01:44.284363 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:44 crc kubenswrapper[4884]: I1128 18:01:44.285120 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:44 crc kubenswrapper[4884]: I1128 18:01:44.337355 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:51 crc kubenswrapper[4884]: I1128 18:01:51.247660 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:01:51 crc kubenswrapper[4884]: I1128 18:01:51.248417 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:01:54 crc kubenswrapper[4884]: I1128 18:01:54.383959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:54 crc kubenswrapper[4884]: I1128 18:01:54.454932 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:54 crc kubenswrapper[4884]: I1128 18:01:54.651452 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hfkkk" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="registry-server" containerID="cri-o://d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6" gracePeriod=2 Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.513341 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.610575 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwk76\" (UniqueName: \"kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76\") pod \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.610630 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities\") pod \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.610992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content\") pod \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\" (UID: \"ddc71974-f417-4e2e-b187-9e8e9720e3f5\") " Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.612166 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities" (OuterVolumeSpecName: "utilities") pod "ddc71974-f417-4e2e-b187-9e8e9720e3f5" (UID: "ddc71974-f417-4e2e-b187-9e8e9720e3f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.621553 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76" (OuterVolumeSpecName: "kube-api-access-qwk76") pod "ddc71974-f417-4e2e-b187-9e8e9720e3f5" (UID: "ddc71974-f417-4e2e-b187-9e8e9720e3f5"). InnerVolumeSpecName "kube-api-access-qwk76". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.669163 4884 generic.go:334] "Generic (PLEG): container finished" podID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerID="d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6" exitCode=0 Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.669235 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerDied","Data":"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6"} Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.669264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfkkk" event={"ID":"ddc71974-f417-4e2e-b187-9e8e9720e3f5","Type":"ContainerDied","Data":"49545605061d51bc99ffd18905cd3afa2c67338e3cdf84fd316f732645feda99"} Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.669283 4884 scope.go:117] "RemoveContainer" containerID="d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.669414 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfkkk" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.670906 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddc71974-f417-4e2e-b187-9e8e9720e3f5" (UID: "ddc71974-f417-4e2e-b187-9e8e9720e3f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.709221 4884 scope.go:117] "RemoveContainer" containerID="43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.713547 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.713583 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwk76\" (UniqueName: \"kubernetes.io/projected/ddc71974-f417-4e2e-b187-9e8e9720e3f5-kube-api-access-qwk76\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.713600 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddc71974-f417-4e2e-b187-9e8e9720e3f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.730962 4884 scope.go:117] "RemoveContainer" containerID="70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.783399 4884 scope.go:117] "RemoveContainer" containerID="d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6" Nov 28 18:01:55 crc kubenswrapper[4884]: E1128 18:01:55.783879 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6\": container with ID starting with d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6 not found: ID does not exist" containerID="d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.783952 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6"} err="failed to get container status \"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6\": rpc error: code = NotFound desc = could not find container \"d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6\": container with ID starting with d75a113f2751624070ff0ccaccd932cb26ba79210b6e96d5ee76d7cdc6356bf6 not found: ID does not exist" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.783992 4884 scope.go:117] "RemoveContainer" containerID="43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f" Nov 28 18:01:55 crc kubenswrapper[4884]: E1128 18:01:55.784753 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f\": container with ID starting with 43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f not found: ID does not exist" containerID="43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.784793 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f"} err="failed to get container status \"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f\": rpc error: code = NotFound desc = could not find container \"43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f\": container with ID starting with 43bb6b9ff0e276bf253b3c1eea0771a5cb3f5aae8ee6a2f35f14291cd76b033f not found: ID does not exist" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.784816 4884 scope.go:117] "RemoveContainer" containerID="70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e" Nov 28 18:01:55 crc kubenswrapper[4884]: E1128 18:01:55.785307 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e\": container with ID starting with 70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e not found: ID does not exist" containerID="70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e" Nov 28 18:01:55 crc kubenswrapper[4884]: I1128 18:01:55.785391 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e"} err="failed to get container status \"70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e\": rpc error: code = NotFound desc = could not find container \"70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e\": container with ID starting with 70056acf1366ae223a5ce03df78d64f14716de2cd034d472728902d5ed7dd24e not found: ID does not exist" Nov 28 18:01:56 crc kubenswrapper[4884]: I1128 18:01:56.023340 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:56 crc kubenswrapper[4884]: I1128 18:01:56.042846 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hfkkk"] Nov 28 18:01:56 crc kubenswrapper[4884]: I1128 18:01:56.703837 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" path="/var/lib/kubelet/pods/ddc71974-f417-4e2e-b187-9e8e9720e3f5/volumes" Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.243739 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.244387 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.244448 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.245455 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.245523 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e" gracePeriod=600 Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.984209 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e" exitCode=0 Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.984275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e"} Nov 28 18:02:21 crc kubenswrapper[4884]: I1128 18:02:21.984331 4884 scope.go:117] "RemoveContainer" containerID="6a33bc580ca08b26964e48f11a014660a101cba2d164ecb503653b0e6710e2fb" Nov 28 18:02:22 crc kubenswrapper[4884]: I1128 18:02:22.995544 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d"} Nov 28 18:04:51 crc kubenswrapper[4884]: I1128 18:04:51.243385 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:04:51 crc kubenswrapper[4884]: I1128 18:04:51.244005 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:04:51 crc kubenswrapper[4884]: I1128 18:04:51.641917 4884 generic.go:334] "Generic (PLEG): container finished" podID="436513f6-b254-482c-96a2-12faf0ab7f10" containerID="1248db5d29825d1a357d656e4ffed69408ecba84fb1bf8e64d1f93ef468f8424" exitCode=0 Nov 28 18:04:51 crc kubenswrapper[4884]: I1128 18:04:51.641972 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" event={"ID":"436513f6-b254-482c-96a2-12faf0ab7f10","Type":"ContainerDied","Data":"1248db5d29825d1a357d656e4ffed69408ecba84fb1bf8e64d1f93ef468f8424"} Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.171278 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.320949 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.320990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.321035 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.321063 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.321145 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.321355 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhm6f\" (UniqueName: \"kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f\") pod \"436513f6-b254-482c-96a2-12faf0ab7f10\" (UID: \"436513f6-b254-482c-96a2-12faf0ab7f10\") " Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.327101 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f" (OuterVolumeSpecName: "kube-api-access-nhm6f") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "kube-api-access-nhm6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.327127 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph" (OuterVolumeSpecName: "ceph") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.328640 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.349433 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.357915 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.361354 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory" (OuterVolumeSpecName: "inventory") pod "436513f6-b254-482c-96a2-12faf0ab7f10" (UID: "436513f6-b254-482c-96a2-12faf0ab7f10"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423586 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhm6f\" (UniqueName: \"kubernetes.io/projected/436513f6-b254-482c-96a2-12faf0ab7f10-kube-api-access-nhm6f\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423631 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423645 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423658 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423671 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.423688 4884 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/436513f6-b254-482c-96a2-12faf0ab7f10-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.665186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" event={"ID":"436513f6-b254-482c-96a2-12faf0ab7f10","Type":"ContainerDied","Data":"a5fed306fd82f6cfa570c7aa08f2bec6633d71176e5c6431df9defbce5fe7948"} Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.665544 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5fed306fd82f6cfa570c7aa08f2bec6633d71176e5c6431df9defbce5fe7948" Nov 28 18:04:53 crc kubenswrapper[4884]: I1128 18:04:53.665627 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-42kgt" Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.102714 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.103525 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" containerName="nova-cell0-conductor-conductor" containerID="cri-o://f972c40085455d8433c3a7023300422f477d84bb2982dade567f0aec3613c9fa" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.139948 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.140263 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" containerName="nova-cell1-conductor-conductor" containerID="cri-o://e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.831201 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.831888 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" containerName="nova-scheduler-scheduler" containerID="cri-o://4b0dfc644389b7e67d38b0db9eca1ace7fc7b6bb8212cb092d00f1b221f25d62" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.852058 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.853030 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-log" containerID="cri-o://6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.853099 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-api" containerID="cri-o://5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.899739 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.899972 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" containerID="cri-o://526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c" gracePeriod=30 Nov 28 18:05:04 crc kubenswrapper[4884]: I1128 18:05:04.900053 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" containerID="cri-o://589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb" gracePeriod=30 Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.840285 4884 generic.go:334] "Generic (PLEG): container finished" podID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerID="6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b" exitCode=143 Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.840825 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerDied","Data":"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b"} Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.851845 4884 generic.go:334] "Generic (PLEG): container finished" podID="3af77193-c23c-451b-84b1-342985e2fe7e" containerID="526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c" exitCode=143 Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.851932 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerDied","Data":"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c"} Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.854919 4884 generic.go:334] "Generic (PLEG): container finished" podID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" containerID="4b0dfc644389b7e67d38b0db9eca1ace7fc7b6bb8212cb092d00f1b221f25d62" exitCode=0 Nov 28 18:05:05 crc kubenswrapper[4884]: I1128 18:05:05.854952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860","Type":"ContainerDied","Data":"4b0dfc644389b7e67d38b0db9eca1ace7fc7b6bb8212cb092d00f1b221f25d62"} Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.091653 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.206276 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jchcp\" (UniqueName: \"kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp\") pod \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.206377 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data\") pod \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.206400 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle\") pod \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\" (UID: \"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860\") " Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.214223 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp" (OuterVolumeSpecName: "kube-api-access-jchcp") pod "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" (UID: "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860"). InnerVolumeSpecName "kube-api-access-jchcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.248067 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data" (OuterVolumeSpecName: "config-data") pod "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" (UID: "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.271908 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" (UID: "7c9041b2-31ca-4c8f-a9c6-f39fe85f1860"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.309618 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jchcp\" (UniqueName: \"kubernetes.io/projected/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-kube-api-access-jchcp\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.309658 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.309668 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.867528 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c9041b2-31ca-4c8f-a9c6-f39fe85f1860","Type":"ContainerDied","Data":"acfc28e925a1ea951b9a4a96f8c0dcfc77d21f106d9b720d49a1231b96a418eb"} Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.867585 4884 scope.go:117] "RemoveContainer" containerID="4b0dfc644389b7e67d38b0db9eca1ace7fc7b6bb8212cb092d00f1b221f25d62" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.867723 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.903908 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.926618 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.952378 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:06 crc kubenswrapper[4884]: E1128 18:05:06.952970 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="extract-utilities" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.952994 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="extract-utilities" Nov 28 18:05:06 crc kubenswrapper[4884]: E1128 18:05:06.953019 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="registry-server" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953028 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="registry-server" Nov 28 18:05:06 crc kubenswrapper[4884]: E1128 18:05:06.953052 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="extract-content" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953060 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="extract-content" Nov 28 18:05:06 crc kubenswrapper[4884]: E1128 18:05:06.953070 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" containerName="nova-scheduler-scheduler" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953076 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" containerName="nova-scheduler-scheduler" Nov 28 18:05:06 crc kubenswrapper[4884]: E1128 18:05:06.953117 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="436513f6-b254-482c-96a2-12faf0ab7f10" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953126 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="436513f6-b254-482c-96a2-12faf0ab7f10" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953386 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="436513f6-b254-482c-96a2-12faf0ab7f10" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953421 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" containerName="nova-scheduler-scheduler" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.953467 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc71974-f417-4e2e-b187-9e8e9720e3f5" containerName="registry-server" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.954453 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.956487 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 18:05:06 crc kubenswrapper[4884]: I1128 18:05:06.961749 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.028226 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.028302 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-config-data\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.028478 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjjcm\" (UniqueName: \"kubernetes.io/projected/5b6665fd-4507-4d5c-88ec-678f989dd692-kube-api-access-zjjcm\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.130019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjjcm\" (UniqueName: \"kubernetes.io/projected/5b6665fd-4507-4d5c-88ec-678f989dd692-kube-api-access-zjjcm\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.130163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.130193 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-config-data\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.134375 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-config-data\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.139287 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b6665fd-4507-4d5c-88ec-678f989dd692-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.148018 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjjcm\" (UniqueName: \"kubernetes.io/projected/5b6665fd-4507-4d5c-88ec-678f989dd692-kube-api-access-zjjcm\") pod \"nova-scheduler-0\" (UID: \"5b6665fd-4507-4d5c-88ec-678f989dd692\") " pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.275846 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 18:05:07 crc kubenswrapper[4884]: E1128 18:05:07.594521 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:05:07 crc kubenswrapper[4884]: E1128 18:05:07.596306 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:05:07 crc kubenswrapper[4884]: E1128 18:05:07.597363 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 18:05:07 crc kubenswrapper[4884]: E1128 18:05:07.597392 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" containerName="nova-cell1-conductor-conductor" Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.762884 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.879785 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5b6665fd-4507-4d5c-88ec-678f989dd692","Type":"ContainerStarted","Data":"7043550a020d640ad1b8d04695a3bdfd5f22b79a2d185584d33fefce867d308a"} Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.881594 4884 generic.go:334] "Generic (PLEG): container finished" podID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" containerID="f972c40085455d8433c3a7023300422f477d84bb2982dade567f0aec3613c9fa" exitCode=0 Nov 28 18:05:07 crc kubenswrapper[4884]: I1128 18:05:07.881654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93bec16-fe33-4ed0-b34d-0ba87456cb5f","Type":"ContainerDied","Data":"f972c40085455d8433c3a7023300422f477d84bb2982dade567f0aec3613c9fa"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.056107 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.81:8775/\": read tcp 10.217.0.2:47630->10.217.1.81:8775: read: connection reset by peer" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.056181 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.81:8775/\": read tcp 10.217.0.2:47616->10.217.1.81:8775: read: connection reset by peer" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.132449 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.156253 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle\") pod \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.156373 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsms8\" (UniqueName: \"kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8\") pod \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.156647 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data\") pod \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\" (UID: \"f93bec16-fe33-4ed0-b34d-0ba87456cb5f\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.167381 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8" (OuterVolumeSpecName: "kube-api-access-wsms8") pod "f93bec16-fe33-4ed0-b34d-0ba87456cb5f" (UID: "f93bec16-fe33-4ed0-b34d-0ba87456cb5f"). InnerVolumeSpecName "kube-api-access-wsms8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.201447 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data" (OuterVolumeSpecName: "config-data") pod "f93bec16-fe33-4ed0-b34d-0ba87456cb5f" (UID: "f93bec16-fe33-4ed0-b34d-0ba87456cb5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.203592 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f93bec16-fe33-4ed0-b34d-0ba87456cb5f" (UID: "f93bec16-fe33-4ed0-b34d-0ba87456cb5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.261220 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.261260 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsms8\" (UniqueName: \"kubernetes.io/projected/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-kube-api-access-wsms8\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.261271 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f93bec16-fe33-4ed0-b34d-0ba87456cb5f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.589146 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.673700 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data\") pod \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.673751 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle\") pod \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.673786 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs\") pod \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.674077 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr2xz\" (UniqueName: \"kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz\") pod \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\" (UID: \"557350c4-25cf-4c90-aa9e-14f7e6623ebb\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.677638 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs" (OuterVolumeSpecName: "logs") pod "557350c4-25cf-4c90-aa9e-14f7e6623ebb" (UID: "557350c4-25cf-4c90-aa9e-14f7e6623ebb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.703865 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c9041b2-31ca-4c8f-a9c6-f39fe85f1860" path="/var/lib/kubelet/pods/7c9041b2-31ca-4c8f-a9c6-f39fe85f1860/volumes" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.706865 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "557350c4-25cf-4c90-aa9e-14f7e6623ebb" (UID: "557350c4-25cf-4c90-aa9e-14f7e6623ebb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.707777 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz" (OuterVolumeSpecName: "kube-api-access-wr2xz") pod "557350c4-25cf-4c90-aa9e-14f7e6623ebb" (UID: "557350c4-25cf-4c90-aa9e-14f7e6623ebb"). InnerVolumeSpecName "kube-api-access-wr2xz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.720371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data" (OuterVolumeSpecName: "config-data") pod "557350c4-25cf-4c90-aa9e-14f7e6623ebb" (UID: "557350c4-25cf-4c90-aa9e-14f7e6623ebb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.733414 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.776462 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs\") pod \"3af77193-c23c-451b-84b1-342985e2fe7e\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.776525 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttrzj\" (UniqueName: \"kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj\") pod \"3af77193-c23c-451b-84b1-342985e2fe7e\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.777133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle\") pod \"3af77193-c23c-451b-84b1-342985e2fe7e\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.777338 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data\") pod \"3af77193-c23c-451b-84b1-342985e2fe7e\" (UID: \"3af77193-c23c-451b-84b1-342985e2fe7e\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.777378 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs" (OuterVolumeSpecName: "logs") pod "3af77193-c23c-451b-84b1-342985e2fe7e" (UID: "3af77193-c23c-451b-84b1-342985e2fe7e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.798372 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj" (OuterVolumeSpecName: "kube-api-access-ttrzj") pod "3af77193-c23c-451b-84b1-342985e2fe7e" (UID: "3af77193-c23c-451b-84b1-342985e2fe7e"). InnerVolumeSpecName "kube-api-access-ttrzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.798936 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.800031 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/557350c4-25cf-4c90-aa9e-14f7e6623ebb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.800058 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/557350c4-25cf-4c90-aa9e-14f7e6623ebb-logs\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.800070 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr2xz\" (UniqueName: \"kubernetes.io/projected/557350c4-25cf-4c90-aa9e-14f7e6623ebb-kube-api-access-wr2xz\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.800082 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3af77193-c23c-451b-84b1-342985e2fe7e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.800104 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttrzj\" (UniqueName: \"kubernetes.io/projected/3af77193-c23c-451b-84b1-342985e2fe7e-kube-api-access-ttrzj\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.809603 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3af77193-c23c-451b-84b1-342985e2fe7e" (UID: "3af77193-c23c-451b-84b1-342985e2fe7e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.834294 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data" (OuterVolumeSpecName: "config-data") pod "3af77193-c23c-451b-84b1-342985e2fe7e" (UID: "3af77193-c23c-451b-84b1-342985e2fe7e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.860385 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.895244 4884 generic.go:334] "Generic (PLEG): container finished" podID="6562624b-a1aa-4825-9f04-aaef8f125204" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" exitCode=0 Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.895336 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.895988 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6562624b-a1aa-4825-9f04-aaef8f125204","Type":"ContainerDied","Data":"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.896019 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6562624b-a1aa-4825-9f04-aaef8f125204","Type":"ContainerDied","Data":"142f8289bce49ef2e68029b468c804cadc28541531026971cac88384e4523e69"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.896047 4884 scope.go:117] "RemoveContainer" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.898386 4884 generic.go:334] "Generic (PLEG): container finished" podID="3af77193-c23c-451b-84b1-342985e2fe7e" containerID="589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb" exitCode=0 Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.898433 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerDied","Data":"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.898454 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3af77193-c23c-451b-84b1-342985e2fe7e","Type":"ContainerDied","Data":"26fcec31ae02c03c343f534a7935a900909168052febce1926bf78bfdc0b1cf4"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.898512 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.900870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle\") pod \"6562624b-a1aa-4825-9f04-aaef8f125204\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.901045 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t25m\" (UniqueName: \"kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m\") pod \"6562624b-a1aa-4825-9f04-aaef8f125204\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.901234 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data\") pod \"6562624b-a1aa-4825-9f04-aaef8f125204\" (UID: \"6562624b-a1aa-4825-9f04-aaef8f125204\") " Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.902068 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.902622 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af77193-c23c-451b-84b1-342985e2fe7e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.902450 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerDied","Data":"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.902514 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.902424 4884 generic.go:334] "Generic (PLEG): container finished" podID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerID="5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332" exitCode=0 Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.903452 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"557350c4-25cf-4c90-aa9e-14f7e6623ebb","Type":"ContainerDied","Data":"3993e347c1704a2bd15a1b21acaa3ae5c2f6566f44aebee39c7fca62ef2d3a8c"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.905596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5b6665fd-4507-4d5c-88ec-678f989dd692","Type":"ContainerStarted","Data":"4f4c73aff08efc214d7d7c8cd80309563e3d2b3dd5c406ea4f453573c592c040"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.906303 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m" (OuterVolumeSpecName: "kube-api-access-7t25m") pod "6562624b-a1aa-4825-9f04-aaef8f125204" (UID: "6562624b-a1aa-4825-9f04-aaef8f125204"). InnerVolumeSpecName "kube-api-access-7t25m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.910694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f93bec16-fe33-4ed0-b34d-0ba87456cb5f","Type":"ContainerDied","Data":"6b0d788b95fec155be60a647bb55e14f11f36233e077fe524318b5725944146d"} Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.910996 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.928781 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.928763049 podStartE2EDuration="2.928763049s" podCreationTimestamp="2025-11-28 18:05:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:05:08.9231402 +0000 UTC m=+9948.485924011" watchObservedRunningTime="2025-11-28 18:05:08.928763049 +0000 UTC m=+9948.491546850" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.931933 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6562624b-a1aa-4825-9f04-aaef8f125204" (UID: "6562624b-a1aa-4825-9f04-aaef8f125204"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.948372 4884 scope.go:117] "RemoveContainer" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" Nov 28 18:05:08 crc kubenswrapper[4884]: E1128 18:05:08.948941 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb\": container with ID starting with e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb not found: ID does not exist" containerID="e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.953565 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb"} err="failed to get container status \"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb\": rpc error: code = NotFound desc = could not find container \"e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb\": container with ID starting with e74b550f02b8e4c218f08f08a75456d1adefb215ccaeb52eb988eea940d3e8fb not found: ID does not exist" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.953641 4884 scope.go:117] "RemoveContainer" containerID="589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.968895 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data" (OuterVolumeSpecName: "config-data") pod "6562624b-a1aa-4825-9f04-aaef8f125204" (UID: "6562624b-a1aa-4825-9f04-aaef8f125204"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:05:08 crc kubenswrapper[4884]: I1128 18:05:08.989994 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.004654 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.004686 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t25m\" (UniqueName: \"kubernetes.io/projected/6562624b-a1aa-4825-9f04-aaef8f125204-kube-api-access-7t25m\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.004699 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6562624b-a1aa-4825-9f04-aaef8f125204-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.009916 4884 scope.go:117] "RemoveContainer" containerID="526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.018383 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.029388 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.041744 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.045908 4884 scope.go:117] "RemoveContainer" containerID="589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.046375 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb\": container with ID starting with 589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb not found: ID does not exist" containerID="589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.046410 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb"} err="failed to get container status \"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb\": rpc error: code = NotFound desc = could not find container \"589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb\": container with ID starting with 589de17ba19429f4388b556edc58f5c74755325f789315623c59c37844a306cb not found: ID does not exist" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.046440 4884 scope.go:117] "RemoveContainer" containerID="526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.046723 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c\": container with ID starting with 526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c not found: ID does not exist" containerID="526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.046752 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c"} err="failed to get container status \"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c\": rpc error: code = NotFound desc = could not find container \"526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c\": container with ID starting with 526fa69676dfb792000242692cec3329ada33d4ef7ea0c5e38731c7783442c5c not found: ID does not exist" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.046772 4884 scope.go:117] "RemoveContainer" containerID="5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048423 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048860 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-log" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048878 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-log" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048889 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" containerName="nova-cell0-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048896 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" containerName="nova-cell0-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048920 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" containerName="nova-cell1-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048927 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" containerName="nova-cell1-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048943 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048949 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048964 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048971 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.048985 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-api" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.048992 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-api" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049532 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" containerName="nova-cell0-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049543 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-metadata" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049558 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" containerName="nova-metadata-log" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049574 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" containerName="nova-cell1-conductor-conductor" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049587 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-api" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.049597 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" containerName="nova-api-log" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.050410 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.054977 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.061161 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.071902 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.082057 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.086229 4884 scope.go:117] "RemoveContainer" containerID="6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.092227 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.094475 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.101813 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.102056 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.107772 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.109944 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.111066 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.116970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.117147 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.117300 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhvp2\" (UniqueName: \"kubernetes.io/projected/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-kube-api-access-mhvp2\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.119166 4884 scope.go:117] "RemoveContainer" containerID="5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332" Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.120609 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332\": container with ID starting with 5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332 not found: ID does not exist" containerID="5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.120651 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332"} err="failed to get container status \"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332\": rpc error: code = NotFound desc = could not find container \"5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332\": container with ID starting with 5bf88d93f4a6944e3df9de3b84df9838b2f8097df4954515c5cc0105624d0332 not found: ID does not exist" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.120679 4884 scope.go:117] "RemoveContainer" containerID="6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.121019 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: E1128 18:05:09.121868 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b\": container with ID starting with 6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b not found: ID does not exist" containerID="6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.121900 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b"} err="failed to get container status \"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b\": rpc error: code = NotFound desc = could not find container \"6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b\": container with ID starting with 6075ce3ff0d87e7405b299bea07e91e1dfe101cddeaeb0cda6fbb15008701f1b not found: ID does not exist" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.121938 4884 scope.go:117] "RemoveContainer" containerID="f972c40085455d8433c3a7023300422f477d84bb2982dade567f0aec3613c9fa" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.219869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-config-data\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.219931 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b94860e-4dad-4539-9261-843db90fa876-logs\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbrmx\" (UniqueName: \"kubernetes.io/projected/1b761da9-cfa3-47d8-9738-c2dcb53921d1-kube-api-access-rbrmx\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220083 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220173 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-config-data\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220257 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhvp2\" (UniqueName: \"kubernetes.io/projected/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-kube-api-access-mhvp2\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220284 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b761da9-cfa3-47d8-9738-c2dcb53921d1-logs\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.220394 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6vdp\" (UniqueName: \"kubernetes.io/projected/7b94860e-4dad-4539-9261-843db90fa876-kube-api-access-g6vdp\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.224329 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.224497 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.240298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhvp2\" (UniqueName: \"kubernetes.io/projected/bf2cb4fa-52dc-4714-8cf3-35be415c6f9f-kube-api-access-mhvp2\") pod \"nova-cell0-conductor-0\" (UID: \"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f\") " pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.249331 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.269595 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.295230 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.297065 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.299861 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.307131 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.321920 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-config-data\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.321983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b761da9-cfa3-47d8-9738-c2dcb53921d1-logs\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6vdp\" (UniqueName: \"kubernetes.io/projected/7b94860e-4dad-4539-9261-843db90fa876-kube-api-access-g6vdp\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322136 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-config-data\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322162 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b94860e-4dad-4539-9261-843db90fa876-logs\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbrmx\" (UniqueName: \"kubernetes.io/projected/1b761da9-cfa3-47d8-9738-c2dcb53921d1-kube-api-access-rbrmx\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322279 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322689 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b761da9-cfa3-47d8-9738-c2dcb53921d1-logs\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.322963 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b94860e-4dad-4539-9261-843db90fa876-logs\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.327964 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-config-data\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.327981 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b761da9-cfa3-47d8-9738-c2dcb53921d1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.327992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-config-data\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.329964 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b94860e-4dad-4539-9261-843db90fa876-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.340361 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6vdp\" (UniqueName: \"kubernetes.io/projected/7b94860e-4dad-4539-9261-843db90fa876-kube-api-access-g6vdp\") pod \"nova-metadata-0\" (UID: \"7b94860e-4dad-4539-9261-843db90fa876\") " pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.340762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbrmx\" (UniqueName: \"kubernetes.io/projected/1b761da9-cfa3-47d8-9738-c2dcb53921d1-kube-api-access-rbrmx\") pod \"nova-api-0\" (UID: \"1b761da9-cfa3-47d8-9738-c2dcb53921d1\") " pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.382431 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.423886 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.423939 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.423995 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcl6n\" (UniqueName: \"kubernetes.io/projected/88c9b274-5227-489e-bb7b-9b9469cf35c8-kube-api-access-jcl6n\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.431339 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.526511 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.526764 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.526844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcl6n\" (UniqueName: \"kubernetes.io/projected/88c9b274-5227-489e-bb7b-9b9469cf35c8-kube-api-access-jcl6n\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.531690 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.531918 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88c9b274-5227-489e-bb7b-9b9469cf35c8-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.552957 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcl6n\" (UniqueName: \"kubernetes.io/projected/88c9b274-5227-489e-bb7b-9b9469cf35c8-kube-api-access-jcl6n\") pod \"nova-cell1-conductor-0\" (UID: \"88c9b274-5227-489e-bb7b-9b9469cf35c8\") " pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.629337 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.771456 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.910550 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 18:05:09 crc kubenswrapper[4884]: W1128 18:05:09.917687 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf2cb4fa_52dc_4714_8cf3_35be415c6f9f.slice/crio-0516a072d18cd336a8b8421c1a84bae17f0e49aea92aad4587b30516e49d6aaf WatchSource:0}: Error finding container 0516a072d18cd336a8b8421c1a84bae17f0e49aea92aad4587b30516e49d6aaf: Status 404 returned error can't find the container with id 0516a072d18cd336a8b8421c1a84bae17f0e49aea92aad4587b30516e49d6aaf Nov 28 18:05:09 crc kubenswrapper[4884]: I1128 18:05:09.986281 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.168471 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 18:05:10 crc kubenswrapper[4884]: W1128 18:05:10.175943 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b761da9_cfa3_47d8_9738_c2dcb53921d1.slice/crio-53dda9204f9216147a46c344fc1f12a15f3cfe3bebe1bcea7d763460cf923de9 WatchSource:0}: Error finding container 53dda9204f9216147a46c344fc1f12a15f3cfe3bebe1bcea7d763460cf923de9: Status 404 returned error can't find the container with id 53dda9204f9216147a46c344fc1f12a15f3cfe3bebe1bcea7d763460cf923de9 Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.264666 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 18:05:10 crc kubenswrapper[4884]: W1128 18:05:10.270080 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88c9b274_5227_489e_bb7b_9b9469cf35c8.slice/crio-a059a14482d8433b699e5475cb92d0783bea04061718e84355e48a176185b176 WatchSource:0}: Error finding container a059a14482d8433b699e5475cb92d0783bea04061718e84355e48a176185b176: Status 404 returned error can't find the container with id a059a14482d8433b699e5475cb92d0783bea04061718e84355e48a176185b176 Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.702368 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3af77193-c23c-451b-84b1-342985e2fe7e" path="/var/lib/kubelet/pods/3af77193-c23c-451b-84b1-342985e2fe7e/volumes" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.703822 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="557350c4-25cf-4c90-aa9e-14f7e6623ebb" path="/var/lib/kubelet/pods/557350c4-25cf-4c90-aa9e-14f7e6623ebb/volumes" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.707166 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6562624b-a1aa-4825-9f04-aaef8f125204" path="/var/lib/kubelet/pods/6562624b-a1aa-4825-9f04-aaef8f125204/volumes" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.708800 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f93bec16-fe33-4ed0-b34d-0ba87456cb5f" path="/var/lib/kubelet/pods/f93bec16-fe33-4ed0-b34d-0ba87456cb5f/volumes" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.945259 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7b94860e-4dad-4539-9261-843db90fa876","Type":"ContainerStarted","Data":"b4d60fef8e1c19558f76dc848fb1b6dce569d5323332ded4ae9b30024df6df4d"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.945315 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7b94860e-4dad-4539-9261-843db90fa876","Type":"ContainerStarted","Data":"27818c7befe4615f0233ac0a1985b7f1d0b9b0c11c8522d6631827e614e48e63"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.945330 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7b94860e-4dad-4539-9261-843db90fa876","Type":"ContainerStarted","Data":"9afbe41757127a8b883e83357cfc92f26e914412267189f8107ecd204760ee6a"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.947616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"88c9b274-5227-489e-bb7b-9b9469cf35c8","Type":"ContainerStarted","Data":"b7989a893cd4864632da1e7635264afee97c434c6cffe8a9fb0f79bc5070c85a"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.947659 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"88c9b274-5227-489e-bb7b-9b9469cf35c8","Type":"ContainerStarted","Data":"a059a14482d8433b699e5475cb92d0783bea04061718e84355e48a176185b176"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.947747 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.949258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f","Type":"ContainerStarted","Data":"74cf91c2056a26057c5d5cdec19bd14259deefed757d1879fde5887d65a89a47"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.949293 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bf2cb4fa-52dc-4714-8cf3-35be415c6f9f","Type":"ContainerStarted","Data":"0516a072d18cd336a8b8421c1a84bae17f0e49aea92aad4587b30516e49d6aaf"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.949699 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.955025 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b761da9-cfa3-47d8-9738-c2dcb53921d1","Type":"ContainerStarted","Data":"3e4910d958b8c072981bedf64cfea334d3eafb035b0724d3beb693992b41a854"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.955082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b761da9-cfa3-47d8-9738-c2dcb53921d1","Type":"ContainerStarted","Data":"b6486ee64ab383d9c8085377f722fe97def3ad401d661afa40b66e84a12fa978"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.955121 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b761da9-cfa3-47d8-9738-c2dcb53921d1","Type":"ContainerStarted","Data":"53dda9204f9216147a46c344fc1f12a15f3cfe3bebe1bcea7d763460cf923de9"} Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.978222 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.9782025279999997 podStartE2EDuration="2.978202528s" podCreationTimestamp="2025-11-28 18:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:05:10.960350196 +0000 UTC m=+9950.523134007" watchObservedRunningTime="2025-11-28 18:05:10.978202528 +0000 UTC m=+9950.540986329" Nov 28 18:05:10 crc kubenswrapper[4884]: I1128 18:05:10.985475 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.985455567 podStartE2EDuration="1.985455567s" podCreationTimestamp="2025-11-28 18:05:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:05:10.983199542 +0000 UTC m=+9950.545983353" watchObservedRunningTime="2025-11-28 18:05:10.985455567 +0000 UTC m=+9950.548239368" Nov 28 18:05:11 crc kubenswrapper[4884]: I1128 18:05:11.023188 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.023172199 podStartE2EDuration="3.023172199s" podCreationTimestamp="2025-11-28 18:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:05:11.015764596 +0000 UTC m=+9950.578548407" watchObservedRunningTime="2025-11-28 18:05:11.023172199 +0000 UTC m=+9950.585955990" Nov 28 18:05:11 crc kubenswrapper[4884]: I1128 18:05:11.043757 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.043739278 podStartE2EDuration="3.043739278s" podCreationTimestamp="2025-11-28 18:05:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 18:05:11.038621291 +0000 UTC m=+9950.601405092" watchObservedRunningTime="2025-11-28 18:05:11.043739278 +0000 UTC m=+9950.606523079" Nov 28 18:05:12 crc kubenswrapper[4884]: I1128 18:05:12.276977 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 18:05:14 crc kubenswrapper[4884]: I1128 18:05:14.432385 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 18:05:14 crc kubenswrapper[4884]: I1128 18:05:14.432884 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 18:05:17 crc kubenswrapper[4884]: I1128 18:05:17.276401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 18:05:17 crc kubenswrapper[4884]: I1128 18:05:17.327460 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 18:05:18 crc kubenswrapper[4884]: I1128 18:05:18.120766 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.432584 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.432637 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.587454 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.629891 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.629963 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 18:05:19 crc kubenswrapper[4884]: I1128 18:05:19.825485 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 18:05:20 crc kubenswrapper[4884]: I1128 18:05:20.516329 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7b94860e-4dad-4539-9261-843db90fa876" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:05:20 crc kubenswrapper[4884]: I1128 18:05:20.516672 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7b94860e-4dad-4539-9261-843db90fa876" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:05:20 crc kubenswrapper[4884]: I1128 18:05:20.714383 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1b761da9-cfa3-47d8-9738-c2dcb53921d1" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:05:20 crc kubenswrapper[4884]: I1128 18:05:20.715918 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1b761da9-cfa3-47d8-9738-c2dcb53921d1" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 18:05:21 crc kubenswrapper[4884]: I1128 18:05:21.243688 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:05:21 crc kubenswrapper[4884]: I1128 18:05:21.243777 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.622915 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.627798 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.658127 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqxbf\" (UniqueName: \"kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.658192 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.658378 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.665145 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.761030 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.761297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqxbf\" (UniqueName: \"kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.761332 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.761589 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.761825 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.790745 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqxbf\" (UniqueName: \"kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf\") pod \"community-operators-rsg6g\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:23 crc kubenswrapper[4884]: I1128 18:05:23.964175 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:24 crc kubenswrapper[4884]: I1128 18:05:24.548567 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:24 crc kubenswrapper[4884]: W1128 18:05:24.549892 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c657bf9_ca6d_4260_bc7f_f9c382c6dc99.slice/crio-f7d114d792bce4a78129e9d2ac054f93a072806941dbb9b5f42f55a7426eafc0 WatchSource:0}: Error finding container f7d114d792bce4a78129e9d2ac054f93a072806941dbb9b5f42f55a7426eafc0: Status 404 returned error can't find the container with id f7d114d792bce4a78129e9d2ac054f93a072806941dbb9b5f42f55a7426eafc0 Nov 28 18:05:24 crc kubenswrapper[4884]: E1128 18:05:24.991902 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c657bf9_ca6d_4260_bc7f_f9c382c6dc99.slice/crio-8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3c657bf9_ca6d_4260_bc7f_f9c382c6dc99.slice/crio-conmon-8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1.scope\": RecentStats: unable to find data in memory cache]" Nov 28 18:05:25 crc kubenswrapper[4884]: I1128 18:05:25.178687 4884 generic.go:334] "Generic (PLEG): container finished" podID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerID="8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1" exitCode=0 Nov 28 18:05:25 crc kubenswrapper[4884]: I1128 18:05:25.178784 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerDied","Data":"8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1"} Nov 28 18:05:25 crc kubenswrapper[4884]: I1128 18:05:25.178855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerStarted","Data":"f7d114d792bce4a78129e9d2ac054f93a072806941dbb9b5f42f55a7426eafc0"} Nov 28 18:05:27 crc kubenswrapper[4884]: I1128 18:05:27.204258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerStarted","Data":"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8"} Nov 28 18:05:28 crc kubenswrapper[4884]: I1128 18:05:28.225223 4884 generic.go:334] "Generic (PLEG): container finished" podID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerID="e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8" exitCode=0 Nov 28 18:05:28 crc kubenswrapper[4884]: I1128 18:05:28.225301 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerDied","Data":"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8"} Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.243961 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerStarted","Data":"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58"} Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.283695 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rsg6g" podStartSLOduration=2.548572205 podStartE2EDuration="6.28367009s" podCreationTimestamp="2025-11-28 18:05:23 +0000 UTC" firstStartedPulling="2025-11-28 18:05:25.182168397 +0000 UTC m=+9964.744952188" lastFinishedPulling="2025-11-28 18:05:28.917266232 +0000 UTC m=+9968.480050073" observedRunningTime="2025-11-28 18:05:29.269580021 +0000 UTC m=+9968.832363852" watchObservedRunningTime="2025-11-28 18:05:29.28367009 +0000 UTC m=+9968.846453931" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.434498 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.434953 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.438221 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.633792 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.633983 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.634226 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.634250 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.638078 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 18:05:29 crc kubenswrapper[4884]: I1128 18:05:29.639586 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 18:05:30 crc kubenswrapper[4884]: I1128 18:05:30.257017 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.230057 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj"] Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.231732 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.233764 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.234136 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4wkxj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.234436 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.234557 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.234571 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.235362 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.236129 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.253820 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.253877 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.253929 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.253974 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxfgt\" (UniqueName: \"kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254016 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254035 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254059 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254080 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254112 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.254208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.261821 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj"] Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355735 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxfgt\" (UniqueName: \"kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355836 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355894 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355918 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.355958 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.356042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.356080 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.356141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.361832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.362649 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.363211 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.363675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.367420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.378287 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.379460 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.379803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.380937 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.384780 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.389938 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxfgt\" (UniqueName: \"kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:31 crc kubenswrapper[4884]: I1128 18:05:31.554365 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:05:32 crc kubenswrapper[4884]: I1128 18:05:32.198581 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj"] Nov 28 18:05:32 crc kubenswrapper[4884]: W1128 18:05:32.202293 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09df7760_4d98_47f8_b867_6b19765ca19f.slice/crio-4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b WatchSource:0}: Error finding container 4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b: Status 404 returned error can't find the container with id 4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b Nov 28 18:05:32 crc kubenswrapper[4884]: I1128 18:05:32.288123 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" event={"ID":"09df7760-4d98-47f8-b867-6b19765ca19f","Type":"ContainerStarted","Data":"4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b"} Nov 28 18:05:33 crc kubenswrapper[4884]: I1128 18:05:33.964391 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:33 crc kubenswrapper[4884]: I1128 18:05:33.964714 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:34 crc kubenswrapper[4884]: I1128 18:05:34.040259 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:34 crc kubenswrapper[4884]: I1128 18:05:34.318493 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" event={"ID":"09df7760-4d98-47f8-b867-6b19765ca19f","Type":"ContainerStarted","Data":"7a2304d1e841232cfd91d109489cd2f2f6688dd3a41bbcc82e76dfc6e4610283"} Nov 28 18:05:34 crc kubenswrapper[4884]: I1128 18:05:34.344330 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" podStartSLOduration=2.630118806 podStartE2EDuration="3.34431135s" podCreationTimestamp="2025-11-28 18:05:31 +0000 UTC" firstStartedPulling="2025-11-28 18:05:32.205980824 +0000 UTC m=+9971.768764635" lastFinishedPulling="2025-11-28 18:05:32.920173378 +0000 UTC m=+9972.482957179" observedRunningTime="2025-11-28 18:05:34.335656347 +0000 UTC m=+9973.898440198" watchObservedRunningTime="2025-11-28 18:05:34.34431135 +0000 UTC m=+9973.907095151" Nov 28 18:05:34 crc kubenswrapper[4884]: I1128 18:05:34.395880 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:34 crc kubenswrapper[4884]: I1128 18:05:34.463077 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:36 crc kubenswrapper[4884]: I1128 18:05:36.341680 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rsg6g" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="registry-server" containerID="cri-o://50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58" gracePeriod=2 Nov 28 18:05:36 crc kubenswrapper[4884]: I1128 18:05:36.948523 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.007007 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content\") pod \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.007407 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities\") pod \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.007682 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqxbf\" (UniqueName: \"kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf\") pod \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\" (UID: \"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99\") " Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.008502 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities" (OuterVolumeSpecName: "utilities") pod "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" (UID: "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.025470 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf" (OuterVolumeSpecName: "kube-api-access-gqxbf") pod "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" (UID: "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99"). InnerVolumeSpecName "kube-api-access-gqxbf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.056363 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" (UID: "3c657bf9-ca6d-4260-bc7f-f9c382c6dc99"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.111696 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqxbf\" (UniqueName: \"kubernetes.io/projected/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-kube-api-access-gqxbf\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.111765 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.111794 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.359124 4884 generic.go:334] "Generic (PLEG): container finished" podID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerID="50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58" exitCode=0 Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.359186 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsg6g" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.359200 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerDied","Data":"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58"} Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.359298 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsg6g" event={"ID":"3c657bf9-ca6d-4260-bc7f-f9c382c6dc99","Type":"ContainerDied","Data":"f7d114d792bce4a78129e9d2ac054f93a072806941dbb9b5f42f55a7426eafc0"} Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.359343 4884 scope.go:117] "RemoveContainer" containerID="50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.420401 4884 scope.go:117] "RemoveContainer" containerID="e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.423435 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.443421 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rsg6g"] Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.460908 4884 scope.go:117] "RemoveContainer" containerID="8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.511374 4884 scope.go:117] "RemoveContainer" containerID="50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58" Nov 28 18:05:37 crc kubenswrapper[4884]: E1128 18:05:37.512060 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58\": container with ID starting with 50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58 not found: ID does not exist" containerID="50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.512133 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58"} err="failed to get container status \"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58\": rpc error: code = NotFound desc = could not find container \"50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58\": container with ID starting with 50c3cc0594578b096bb0b89c64fd1a6539a242298dbb6311efdfaeff6c005f58 not found: ID does not exist" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.512167 4884 scope.go:117] "RemoveContainer" containerID="e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8" Nov 28 18:05:37 crc kubenswrapper[4884]: E1128 18:05:37.512629 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8\": container with ID starting with e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8 not found: ID does not exist" containerID="e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.512701 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8"} err="failed to get container status \"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8\": rpc error: code = NotFound desc = could not find container \"e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8\": container with ID starting with e9c7b04bf2c7bd6928ea4ddbc7db17dfa45c772e6786abe209e67dedf2bab0a8 not found: ID does not exist" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.512739 4884 scope.go:117] "RemoveContainer" containerID="8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1" Nov 28 18:05:37 crc kubenswrapper[4884]: E1128 18:05:37.513157 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1\": container with ID starting with 8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1 not found: ID does not exist" containerID="8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1" Nov 28 18:05:37 crc kubenswrapper[4884]: I1128 18:05:37.513194 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1"} err="failed to get container status \"8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1\": rpc error: code = NotFound desc = could not find container \"8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1\": container with ID starting with 8e5b67ce9c0d176e21a084a20b29beae494896fcce54c09363577e137535f9c1 not found: ID does not exist" Nov 28 18:05:38 crc kubenswrapper[4884]: I1128 18:05:38.700239 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" path="/var/lib/kubelet/pods/3c657bf9-ca6d-4260-bc7f-f9c382c6dc99/volumes" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.242722 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.243634 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.243712 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.245153 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.245278 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" gracePeriod=600 Nov 28 18:05:51 crc kubenswrapper[4884]: E1128 18:05:51.376946 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.552704 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" exitCode=0 Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.552761 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d"} Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.552823 4884 scope.go:117] "RemoveContainer" containerID="14b54bea8aef2a4d2be9cd9e83bb2d4c9633b04861db8c123f1ef4ed5ca7956e" Nov 28 18:05:51 crc kubenswrapper[4884]: I1128 18:05:51.553735 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:05:51 crc kubenswrapper[4884]: E1128 18:05:51.554309 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:06:02 crc kubenswrapper[4884]: I1128 18:06:02.688643 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:06:02 crc kubenswrapper[4884]: E1128 18:06:02.689983 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:06:14 crc kubenswrapper[4884]: I1128 18:06:14.689603 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:06:14 crc kubenswrapper[4884]: E1128 18:06:14.690611 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:06:25 crc kubenswrapper[4884]: I1128 18:06:25.688732 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:06:25 crc kubenswrapper[4884]: E1128 18:06:25.689783 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:06:36 crc kubenswrapper[4884]: I1128 18:06:36.691646 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:06:36 crc kubenswrapper[4884]: E1128 18:06:36.693250 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:06:48 crc kubenswrapper[4884]: I1128 18:06:48.689285 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:06:48 crc kubenswrapper[4884]: E1128 18:06:48.690516 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:07:03 crc kubenswrapper[4884]: I1128 18:07:03.690606 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:07:03 crc kubenswrapper[4884]: E1128 18:07:03.691840 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:07:15 crc kubenswrapper[4884]: I1128 18:07:15.688602 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:07:15 crc kubenswrapper[4884]: E1128 18:07:15.689590 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:07:27 crc kubenswrapper[4884]: I1128 18:07:27.690003 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:07:27 crc kubenswrapper[4884]: E1128 18:07:27.691534 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:07:40 crc kubenswrapper[4884]: I1128 18:07:40.701261 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:07:40 crc kubenswrapper[4884]: E1128 18:07:40.702039 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:07:52 crc kubenswrapper[4884]: I1128 18:07:52.689746 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:07:52 crc kubenswrapper[4884]: E1128 18:07:52.691195 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:03 crc kubenswrapper[4884]: I1128 18:08:03.688409 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:08:03 crc kubenswrapper[4884]: E1128 18:08:03.689460 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:16 crc kubenswrapper[4884]: I1128 18:08:16.688760 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:08:16 crc kubenswrapper[4884]: E1128 18:08:16.689713 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:23 crc kubenswrapper[4884]: I1128 18:08:23.317859 4884 generic.go:334] "Generic (PLEG): container finished" podID="09df7760-4d98-47f8-b867-6b19765ca19f" containerID="7a2304d1e841232cfd91d109489cd2f2f6688dd3a41bbcc82e76dfc6e4610283" exitCode=0 Nov 28 18:08:23 crc kubenswrapper[4884]: I1128 18:08:23.317954 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" event={"ID":"09df7760-4d98-47f8-b867-6b19765ca19f","Type":"ContainerDied","Data":"7a2304d1e841232cfd91d109489cd2f2f6688dd3a41bbcc82e76dfc6e4610283"} Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.482655 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:24 crc kubenswrapper[4884]: E1128 18:08:24.483408 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="extract-utilities" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.483420 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="extract-utilities" Nov 28 18:08:24 crc kubenswrapper[4884]: E1128 18:08:24.483436 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="registry-server" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.483442 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="registry-server" Nov 28 18:08:24 crc kubenswrapper[4884]: E1128 18:08:24.483483 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="extract-content" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.483489 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="extract-content" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.483695 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c657bf9-ca6d-4260-bc7f-f9c382c6dc99" containerName="registry-server" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.489609 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.493831 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.604773 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.605804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcq8p\" (UniqueName: \"kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.605964 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.707889 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.708335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcq8p\" (UniqueName: \"kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.708480 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.709060 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.709380 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.750310 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcq8p\" (UniqueName: \"kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p\") pod \"redhat-marketplace-mg5bp\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.814034 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:24 crc kubenswrapper[4884]: I1128 18:08:24.935622 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014209 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014277 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014370 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxfgt\" (UniqueName: \"kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014390 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014442 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014485 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014531 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014589 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.014656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0\") pod \"09df7760-4d98-47f8-b867-6b19765ca19f\" (UID: \"09df7760-4d98-47f8-b867-6b19765ca19f\") " Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.033815 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt" (OuterVolumeSpecName: "kube-api-access-gxfgt") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "kube-api-access-gxfgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.041276 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.056252 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph" (OuterVolumeSpecName: "ceph") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.064200 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.066965 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.074626 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.075766 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.085302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory" (OuterVolumeSpecName: "inventory") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.092827 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.100323 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.109400 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "09df7760-4d98-47f8-b867-6b19765ca19f" (UID: "09df7760-4d98-47f8-b867-6b19765ca19f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117489 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117529 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117543 4884 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117556 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117566 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117575 4884 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117583 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117592 4884 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117601 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117613 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxfgt\" (UniqueName: \"kubernetes.io/projected/09df7760-4d98-47f8-b867-6b19765ca19f-kube-api-access-gxfgt\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.117627 4884 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/09df7760-4d98-47f8-b867-6b19765ca19f-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.319454 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.420629 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" event={"ID":"09df7760-4d98-47f8-b867-6b19765ca19f","Type":"ContainerDied","Data":"4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b"} Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.421025 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e848f6df903daa0368ba25bd5ff1c011fcdf0fbea0932f9908f35f408fd6e5b" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.420950 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj" Nov 28 18:08:25 crc kubenswrapper[4884]: I1128 18:08:25.427395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerStarted","Data":"4a7e93404ca80fa8d8cb1a0b09b2cb4a882ec198fc6518bd9f21296f77a1725d"} Nov 28 18:08:26 crc kubenswrapper[4884]: I1128 18:08:26.438731 4884 generic.go:334] "Generic (PLEG): container finished" podID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerID="99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6" exitCode=0 Nov 28 18:08:26 crc kubenswrapper[4884]: I1128 18:08:26.438789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerDied","Data":"99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6"} Nov 28 18:08:26 crc kubenswrapper[4884]: I1128 18:08:26.442427 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:08:28 crc kubenswrapper[4884]: I1128 18:08:28.468969 4884 generic.go:334] "Generic (PLEG): container finished" podID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerID="fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4" exitCode=0 Nov 28 18:08:28 crc kubenswrapper[4884]: I1128 18:08:28.469156 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerDied","Data":"fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4"} Nov 28 18:08:30 crc kubenswrapper[4884]: I1128 18:08:30.497358 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerStarted","Data":"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2"} Nov 28 18:08:30 crc kubenswrapper[4884]: I1128 18:08:30.526350 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mg5bp" podStartSLOduration=4.079606316 podStartE2EDuration="6.526323414s" podCreationTimestamp="2025-11-28 18:08:24 +0000 UTC" firstStartedPulling="2025-11-28 18:08:26.44218294 +0000 UTC m=+10146.004966741" lastFinishedPulling="2025-11-28 18:08:28.888900028 +0000 UTC m=+10148.451683839" observedRunningTime="2025-11-28 18:08:30.517031214 +0000 UTC m=+10150.079815005" watchObservedRunningTime="2025-11-28 18:08:30.526323414 +0000 UTC m=+10150.089107225" Nov 28 18:08:31 crc kubenswrapper[4884]: I1128 18:08:31.690446 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:08:31 crc kubenswrapper[4884]: E1128 18:08:31.691208 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:34 crc kubenswrapper[4884]: I1128 18:08:34.814181 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:34 crc kubenswrapper[4884]: I1128 18:08:34.814823 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:34 crc kubenswrapper[4884]: I1128 18:08:34.863140 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:35 crc kubenswrapper[4884]: I1128 18:08:35.655120 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:35 crc kubenswrapper[4884]: I1128 18:08:35.738478 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:37 crc kubenswrapper[4884]: I1128 18:08:37.574320 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mg5bp" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="registry-server" containerID="cri-o://14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2" gracePeriod=2 Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.031492 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.137726 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities\") pod \"0d76edff-97fe-46d3-b284-583ba19c76fe\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.137846 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcq8p\" (UniqueName: \"kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p\") pod \"0d76edff-97fe-46d3-b284-583ba19c76fe\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.137992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content\") pod \"0d76edff-97fe-46d3-b284-583ba19c76fe\" (UID: \"0d76edff-97fe-46d3-b284-583ba19c76fe\") " Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.138474 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities" (OuterVolumeSpecName: "utilities") pod "0d76edff-97fe-46d3-b284-583ba19c76fe" (UID: "0d76edff-97fe-46d3-b284-583ba19c76fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.143054 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p" (OuterVolumeSpecName: "kube-api-access-mcq8p") pod "0d76edff-97fe-46d3-b284-583ba19c76fe" (UID: "0d76edff-97fe-46d3-b284-583ba19c76fe"). InnerVolumeSpecName "kube-api-access-mcq8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.164605 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0d76edff-97fe-46d3-b284-583ba19c76fe" (UID: "0d76edff-97fe-46d3-b284-583ba19c76fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.240462 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcq8p\" (UniqueName: \"kubernetes.io/projected/0d76edff-97fe-46d3-b284-583ba19c76fe-kube-api-access-mcq8p\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.240507 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.240522 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0d76edff-97fe-46d3-b284-583ba19c76fe-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.534867 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.535962 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09df7760-4d98-47f8-b867-6b19765ca19f" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.536179 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="09df7760-4d98-47f8-b867-6b19765ca19f" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.536271 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="extract-content" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.536350 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="extract-content" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.536448 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="registry-server" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.536512 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="registry-server" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.536607 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="extract-utilities" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.536689 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="extract-utilities" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.537064 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="09df7760-4d98-47f8-b867-6b19765ca19f" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.537188 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerName="registry-server" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.539213 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.546519 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.599315 4884 generic.go:334] "Generic (PLEG): container finished" podID="0d76edff-97fe-46d3-b284-583ba19c76fe" containerID="14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2" exitCode=0 Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.599364 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerDied","Data":"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2"} Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.599390 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mg5bp" event={"ID":"0d76edff-97fe-46d3-b284-583ba19c76fe","Type":"ContainerDied","Data":"4a7e93404ca80fa8d8cb1a0b09b2cb4a882ec198fc6518bd9f21296f77a1725d"} Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.599408 4884 scope.go:117] "RemoveContainer" containerID="14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.599523 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mg5bp" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.639741 4884 scope.go:117] "RemoveContainer" containerID="fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.651468 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.651689 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frc4r\" (UniqueName: \"kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.651753 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.655261 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.666670 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mg5bp"] Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.677428 4884 scope.go:117] "RemoveContainer" containerID="99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.707268 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d76edff-97fe-46d3-b284-583ba19c76fe" path="/var/lib/kubelet/pods/0d76edff-97fe-46d3-b284-583ba19c76fe/volumes" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.721535 4884 scope.go:117] "RemoveContainer" containerID="14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.721947 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2\": container with ID starting with 14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2 not found: ID does not exist" containerID="14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.721981 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2"} err="failed to get container status \"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2\": rpc error: code = NotFound desc = could not find container \"14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2\": container with ID starting with 14d03d66b28a1e50976699ef5bb14fdbf17c9e9838a184d2e2e316b71e4cf3c2 not found: ID does not exist" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.722003 4884 scope.go:117] "RemoveContainer" containerID="fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.724039 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4\": container with ID starting with fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4 not found: ID does not exist" containerID="fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.724072 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4"} err="failed to get container status \"fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4\": rpc error: code = NotFound desc = could not find container \"fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4\": container with ID starting with fec244054b11f3b8fb7fc3b84b429752932dbc52354b76944eb87d2ebd66d3a4 not found: ID does not exist" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.724105 4884 scope.go:117] "RemoveContainer" containerID="99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6" Nov 28 18:08:38 crc kubenswrapper[4884]: E1128 18:08:38.725013 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6\": container with ID starting with 99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6 not found: ID does not exist" containerID="99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.725045 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6"} err="failed to get container status \"99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6\": rpc error: code = NotFound desc = could not find container \"99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6\": container with ID starting with 99986506e2466c23aacdd58d54dae64de68ee61029628503002a135153612ec6 not found: ID does not exist" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.753860 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frc4r\" (UniqueName: \"kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.753989 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.754117 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.754498 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.754685 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.773703 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frc4r\" (UniqueName: \"kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r\") pod \"redhat-operators-6sskw\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:38 crc kubenswrapper[4884]: I1128 18:08:38.871825 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:39 crc kubenswrapper[4884]: I1128 18:08:39.364777 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:08:39 crc kubenswrapper[4884]: I1128 18:08:39.610469 4884 generic.go:334] "Generic (PLEG): container finished" podID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerID="8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff" exitCode=0 Nov 28 18:08:39 crc kubenswrapper[4884]: I1128 18:08:39.610571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerDied","Data":"8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff"} Nov 28 18:08:39 crc kubenswrapper[4884]: I1128 18:08:39.610785 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerStarted","Data":"3ab028b1ca1f47c42367fef160daf35c14243736873a6bb52f2691a6bbf003ae"} Nov 28 18:08:40 crc kubenswrapper[4884]: I1128 18:08:40.622819 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerStarted","Data":"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796"} Nov 28 18:08:43 crc kubenswrapper[4884]: I1128 18:08:43.660244 4884 generic.go:334] "Generic (PLEG): container finished" podID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerID="079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796" exitCode=0 Nov 28 18:08:43 crc kubenswrapper[4884]: I1128 18:08:43.660307 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerDied","Data":"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796"} Nov 28 18:08:44 crc kubenswrapper[4884]: I1128 18:08:44.679833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerStarted","Data":"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14"} Nov 28 18:08:44 crc kubenswrapper[4884]: I1128 18:08:44.700638 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6sskw" podStartSLOduration=2.186428685 podStartE2EDuration="6.700625419s" podCreationTimestamp="2025-11-28 18:08:38 +0000 UTC" firstStartedPulling="2025-11-28 18:08:39.612281973 +0000 UTC m=+10159.175065774" lastFinishedPulling="2025-11-28 18:08:44.126478707 +0000 UTC m=+10163.689262508" observedRunningTime="2025-11-28 18:08:44.700035655 +0000 UTC m=+10164.262819466" watchObservedRunningTime="2025-11-28 18:08:44.700625419 +0000 UTC m=+10164.263409220" Nov 28 18:08:45 crc kubenswrapper[4884]: I1128 18:08:45.688347 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:08:45 crc kubenswrapper[4884]: E1128 18:08:45.689187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:48 crc kubenswrapper[4884]: I1128 18:08:48.873059 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:48 crc kubenswrapper[4884]: I1128 18:08:48.873436 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:49 crc kubenswrapper[4884]: I1128 18:08:49.952524 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6sskw" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="registry-server" probeResult="failure" output=< Nov 28 18:08:49 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 18:08:49 crc kubenswrapper[4884]: > Nov 28 18:08:57 crc kubenswrapper[4884]: I1128 18:08:57.688190 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:08:57 crc kubenswrapper[4884]: E1128 18:08:57.689205 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:08:58 crc kubenswrapper[4884]: I1128 18:08:58.951757 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:59 crc kubenswrapper[4884]: I1128 18:08:59.005762 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:08:59 crc kubenswrapper[4884]: I1128 18:08:59.189763 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:09:00 crc kubenswrapper[4884]: I1128 18:09:00.857963 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6sskw" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="registry-server" containerID="cri-o://cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14" gracePeriod=2 Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.367626 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.485816 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content\") pod \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.486192 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities\") pod \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.486305 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frc4r\" (UniqueName: \"kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r\") pod \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\" (UID: \"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0\") " Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.487449 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities" (OuterVolumeSpecName: "utilities") pod "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" (UID: "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.492349 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r" (OuterVolumeSpecName: "kube-api-access-frc4r") pod "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" (UID: "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0"). InnerVolumeSpecName "kube-api-access-frc4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.588418 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.588624 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frc4r\" (UniqueName: \"kubernetes.io/projected/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-kube-api-access-frc4r\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.596632 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" (UID: "9b2d411c-4766-4ebc-8f7e-1cd36e5569d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.689820 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.874616 4884 generic.go:334] "Generic (PLEG): container finished" podID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerID="cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14" exitCode=0 Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.874721 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6sskw" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.874699 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerDied","Data":"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14"} Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.875231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6sskw" event={"ID":"9b2d411c-4766-4ebc-8f7e-1cd36e5569d0","Type":"ContainerDied","Data":"3ab028b1ca1f47c42367fef160daf35c14243736873a6bb52f2691a6bbf003ae"} Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.875262 4884 scope.go:117] "RemoveContainer" containerID="cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.911842 4884 scope.go:117] "RemoveContainer" containerID="079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796" Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.932668 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.945497 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6sskw"] Nov 28 18:09:01 crc kubenswrapper[4884]: I1128 18:09:01.949041 4884 scope.go:117] "RemoveContainer" containerID="8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.036879 4884 scope.go:117] "RemoveContainer" containerID="cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14" Nov 28 18:09:02 crc kubenswrapper[4884]: E1128 18:09:02.038356 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14\": container with ID starting with cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14 not found: ID does not exist" containerID="cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.038437 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14"} err="failed to get container status \"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14\": rpc error: code = NotFound desc = could not find container \"cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14\": container with ID starting with cae74f6abb4050517ed1a99f2307a24a4d089662a9f9976b3443b21b9827db14 not found: ID does not exist" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.038494 4884 scope.go:117] "RemoveContainer" containerID="079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796" Nov 28 18:09:02 crc kubenswrapper[4884]: E1128 18:09:02.038988 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796\": container with ID starting with 079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796 not found: ID does not exist" containerID="079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.039049 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796"} err="failed to get container status \"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796\": rpc error: code = NotFound desc = could not find container \"079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796\": container with ID starting with 079f64f9fe2e57f7b8a2a8565881708c3346a41b76605b087aea888d2c50e796 not found: ID does not exist" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.039117 4884 scope.go:117] "RemoveContainer" containerID="8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff" Nov 28 18:09:02 crc kubenswrapper[4884]: E1128 18:09:02.039807 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff\": container with ID starting with 8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff not found: ID does not exist" containerID="8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.039951 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff"} err="failed to get container status \"8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff\": rpc error: code = NotFound desc = could not find container \"8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff\": container with ID starting with 8887d6ddc46fe74f061fbc752462e68cd0dceab918f8ebcdec564f6544e276ff not found: ID does not exist" Nov 28 18:09:02 crc kubenswrapper[4884]: I1128 18:09:02.705174 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" path="/var/lib/kubelet/pods/9b2d411c-4766-4ebc-8f7e-1cd36e5569d0/volumes" Nov 28 18:09:09 crc kubenswrapper[4884]: I1128 18:09:09.689233 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:09:09 crc kubenswrapper[4884]: E1128 18:09:09.690119 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:09:20 crc kubenswrapper[4884]: I1128 18:09:20.703117 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:09:20 crc kubenswrapper[4884]: E1128 18:09:20.704126 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:09:35 crc kubenswrapper[4884]: I1128 18:09:35.689514 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:09:35 crc kubenswrapper[4884]: E1128 18:09:35.690652 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:09:50 crc kubenswrapper[4884]: I1128 18:09:50.697339 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:09:50 crc kubenswrapper[4884]: E1128 18:09:50.698258 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:10:05 crc kubenswrapper[4884]: I1128 18:10:05.688278 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:10:05 crc kubenswrapper[4884]: E1128 18:10:05.689083 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:10:16 crc kubenswrapper[4884]: I1128 18:10:16.689698 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:10:16 crc kubenswrapper[4884]: E1128 18:10:16.690800 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:10:30 crc kubenswrapper[4884]: I1128 18:10:30.703386 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:10:30 crc kubenswrapper[4884]: E1128 18:10:30.704699 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:10:43 crc kubenswrapper[4884]: I1128 18:10:43.689135 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:10:43 crc kubenswrapper[4884]: E1128 18:10:43.690384 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:10:57 crc kubenswrapper[4884]: I1128 18:10:57.688998 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:10:58 crc kubenswrapper[4884]: I1128 18:10:58.291965 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78"} Nov 28 18:11:26 crc kubenswrapper[4884]: I1128 18:11:26.313126 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:11:26 crc kubenswrapper[4884]: I1128 18:11:26.314809 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" containerName="adoption" containerID="cri-o://a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68" gracePeriod=30 Nov 28 18:11:56 crc kubenswrapper[4884]: I1128 18:11:56.836386 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 18:11:56 crc kubenswrapper[4884]: I1128 18:11:56.919650 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-972x6\" (UniqueName: \"kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6\") pod \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " Nov 28 18:11:56 crc kubenswrapper[4884]: I1128 18:11:56.920218 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") pod \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\" (UID: \"2c879f6b-8b91-4d6a-b465-c82e9cec3f92\") " Nov 28 18:11:56 crc kubenswrapper[4884]: I1128 18:11:56.925617 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6" (OuterVolumeSpecName: "kube-api-access-972x6") pod "2c879f6b-8b91-4d6a-b465-c82e9cec3f92" (UID: "2c879f6b-8b91-4d6a-b465-c82e9cec3f92"). InnerVolumeSpecName "kube-api-access-972x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:11:56 crc kubenswrapper[4884]: I1128 18:11:56.943864 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165" (OuterVolumeSpecName: "mariadb-data") pod "2c879f6b-8b91-4d6a-b465-c82e9cec3f92" (UID: "2c879f6b-8b91-4d6a-b465-c82e9cec3f92"). InnerVolumeSpecName "pvc-b087e5a7-c3f2-4673-9358-4508aad2b165". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.024377 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") on node \"crc\" " Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.024431 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-972x6\" (UniqueName: \"kubernetes.io/projected/2c879f6b-8b91-4d6a-b465-c82e9cec3f92-kube-api-access-972x6\") on node \"crc\" DevicePath \"\"" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.056686 4884 generic.go:334] "Generic (PLEG): container finished" podID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" containerID="a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68" exitCode=137 Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.056730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"2c879f6b-8b91-4d6a-b465-c82e9cec3f92","Type":"ContainerDied","Data":"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68"} Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.056756 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"2c879f6b-8b91-4d6a-b465-c82e9cec3f92","Type":"ContainerDied","Data":"e17b1ae10bdb41ad9d1526cb207afcfe9a1f2b17b12fe32b8ceaf91a8f3feeb9"} Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.056759 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.056772 4884 scope.go:117] "RemoveContainer" containerID="a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.092744 4884 scope.go:117] "RemoveContainer" containerID="a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68" Nov 28 18:11:57 crc kubenswrapper[4884]: E1128 18:11:57.097864 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68\": container with ID starting with a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68 not found: ID does not exist" containerID="a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.097928 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68"} err="failed to get container status \"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68\": rpc error: code = NotFound desc = could not find container \"a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68\": container with ID starting with a9e33f0244d9d6f55a9c0755fcbb80d34534862656cb0bc8fb72f5a28be36d68 not found: ID does not exist" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.113157 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.122246 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.663795 4884 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.664205 4884 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b087e5a7-c3f2-4673-9358-4508aad2b165" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165") on node "crc" Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.712765 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.713007 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" containerName="adoption" containerID="cri-o://27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb" gracePeriod=30 Nov 28 18:11:57 crc kubenswrapper[4884]: I1128 18:11:57.746820 4884 reconciler_common.go:293] "Volume detached for volume \"pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b087e5a7-c3f2-4673-9358-4508aad2b165\") on node \"crc\" DevicePath \"\"" Nov 28 18:11:58 crc kubenswrapper[4884]: I1128 18:11:58.706503 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" path="/var/lib/kubelet/pods/2c879f6b-8b91-4d6a-b465-c82e9cec3f92/volumes" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.406935 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.519930 4884 generic.go:334] "Generic (PLEG): container finished" podID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" containerID="27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb" exitCode=137 Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.519988 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff","Type":"ContainerDied","Data":"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb"} Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.520025 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff","Type":"ContainerDied","Data":"c839eb4eac4babf27d9ec7b3786f17b896839bb683f15a029a7b8607413fe0c9"} Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.520045 4884 scope.go:117] "RemoveContainer" containerID="27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.520267 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.541255 4884 scope.go:117] "RemoveContainer" containerID="27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb" Nov 28 18:12:28 crc kubenswrapper[4884]: E1128 18:12:28.541689 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb\": container with ID starting with 27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb not found: ID does not exist" containerID="27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.541743 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb"} err="failed to get container status \"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb\": rpc error: code = NotFound desc = could not find container \"27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb\": container with ID starting with 27f121d438b2abddb0485681a9cef29fee8b76e7a32611fa760e2f9518320cbb not found: ID does not exist" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.607273 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnf98\" (UniqueName: \"kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98\") pod \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.607389 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert\") pod \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.608207 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") pod \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\" (UID: \"f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff\") " Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.615459 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" (UID: "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.615866 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98" (OuterVolumeSpecName: "kube-api-access-gnf98") pod "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" (UID: "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff"). InnerVolumeSpecName "kube-api-access-gnf98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.632617 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9" (OuterVolumeSpecName: "ovn-data") pod "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" (UID: "f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff"). InnerVolumeSpecName "pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.711422 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnf98\" (UniqueName: \"kubernetes.io/projected/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-kube-api-access-gnf98\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.712251 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.712797 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") on node \"crc\" " Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.750036 4884 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.750205 4884 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9") on node "crc" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.815001 4884 reconciler_common.go:293] "Volume detached for volume \"pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-bc66fca3-ecbd-4dc2-bce1-56a71c9c97e9\") on node \"crc\" DevicePath \"\"" Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.847017 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:12:28 crc kubenswrapper[4884]: I1128 18:12:28.860562 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 18:12:30 crc kubenswrapper[4884]: I1128 18:12:30.702608 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" path="/var/lib/kubelet/pods/f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff/volumes" Nov 28 18:13:21 crc kubenswrapper[4884]: I1128 18:13:21.243739 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:13:21 crc kubenswrapper[4884]: I1128 18:13:21.244465 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.463488 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:13:35 crc kubenswrapper[4884]: E1128 18:13:35.467719 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.467760 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: E1128 18:13:35.467802 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="registry-server" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.467814 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="registry-server" Nov 28 18:13:35 crc kubenswrapper[4884]: E1128 18:13:35.467834 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="extract-utilities" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.467845 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="extract-utilities" Nov 28 18:13:35 crc kubenswrapper[4884]: E1128 18:13:35.467871 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.467882 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: E1128 18:13:35.467907 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="extract-content" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.467917 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="extract-content" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.468312 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c879f6b-8b91-4d6a-b465-c82e9cec3f92" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.468348 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b2d411c-4766-4ebc-8f7e-1cd36e5569d0" containerName="registry-server" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.468369 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ad61a5-d96b-4f5e-b5d3-0b31631a65ff" containerName="adoption" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.471344 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.486155 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.595334 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.595602 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.595674 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56fq5\" (UniqueName: \"kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.698733 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.698825 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.698957 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56fq5\" (UniqueName: \"kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.699512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.699556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.723675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56fq5\" (UniqueName: \"kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5\") pod \"certified-operators-dktpw\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:35 crc kubenswrapper[4884]: I1128 18:13:35.822115 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.462175 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.479671 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-cdhk7/must-gather-b6mw5"] Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.485490 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.488454 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cdhk7"/"kube-root-ca.crt" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.488606 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-cdhk7"/"openshift-service-ca.crt" Nov 28 18:13:36 crc kubenswrapper[4884]: W1128 18:13:36.492782 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda85b9fa2_6811_4b6b_8a60_f4a0aa530a6c.slice/crio-13ddc842c6405a722aa94650617c4b39db095c5e0e8c63c1f4e87ba4b3a437c0 WatchSource:0}: Error finding container 13ddc842c6405a722aa94650617c4b39db095c5e0e8c63c1f4e87ba4b3a437c0: Status 404 returned error can't find the container with id 13ddc842c6405a722aa94650617c4b39db095c5e0e8c63c1f4e87ba4b3a437c0 Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.513706 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cdhk7/must-gather-b6mw5"] Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.631505 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxvhb\" (UniqueName: \"kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.631559 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.733297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxvhb\" (UniqueName: \"kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.733363 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.733850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.765351 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxvhb\" (UniqueName: \"kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb\") pod \"must-gather-b6mw5\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:36 crc kubenswrapper[4884]: I1128 18:13:36.930428 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.390762 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-cdhk7/must-gather-b6mw5"] Nov 28 18:13:37 crc kubenswrapper[4884]: W1128 18:13:37.401785 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d7e07e4_8800_4d69_a6ee_b1be14c22cd3.slice/crio-b89342d9f03cafe4405cc2003449f1289f5c72235d0c2de582294190a5a908c6 WatchSource:0}: Error finding container b89342d9f03cafe4405cc2003449f1289f5c72235d0c2de582294190a5a908c6: Status 404 returned error can't find the container with id b89342d9f03cafe4405cc2003449f1289f5c72235d0c2de582294190a5a908c6 Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.407472 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.429339 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" event={"ID":"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3","Type":"ContainerStarted","Data":"b89342d9f03cafe4405cc2003449f1289f5c72235d0c2de582294190a5a908c6"} Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.430948 4884 generic.go:334] "Generic (PLEG): container finished" podID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerID="0af4c49e61ce0beb5667c31d1363664c00f62fe72c686b57105759b6fec614c0" exitCode=0 Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.430984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerDied","Data":"0af4c49e61ce0beb5667c31d1363664c00f62fe72c686b57105759b6fec614c0"} Nov 28 18:13:37 crc kubenswrapper[4884]: I1128 18:13:37.431004 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerStarted","Data":"13ddc842c6405a722aa94650617c4b39db095c5e0e8c63c1f4e87ba4b3a437c0"} Nov 28 18:13:38 crc kubenswrapper[4884]: I1128 18:13:38.444231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerStarted","Data":"9b72e4fae5fa3159bebf9022d202356f73b97571d510aa22479bedebeaa8eab4"} Nov 28 18:13:39 crc kubenswrapper[4884]: I1128 18:13:39.455495 4884 generic.go:334] "Generic (PLEG): container finished" podID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerID="9b72e4fae5fa3159bebf9022d202356f73b97571d510aa22479bedebeaa8eab4" exitCode=0 Nov 28 18:13:39 crc kubenswrapper[4884]: I1128 18:13:39.455568 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerDied","Data":"9b72e4fae5fa3159bebf9022d202356f73b97571d510aa22479bedebeaa8eab4"} Nov 28 18:13:43 crc kubenswrapper[4884]: I1128 18:13:43.515972 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" event={"ID":"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3","Type":"ContainerStarted","Data":"9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c"} Nov 28 18:13:43 crc kubenswrapper[4884]: I1128 18:13:43.516570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" event={"ID":"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3","Type":"ContainerStarted","Data":"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18"} Nov 28 18:13:43 crc kubenswrapper[4884]: I1128 18:13:43.521495 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerStarted","Data":"84ff121909cbc073865a680ed9305dcd93983b4ee05540e5dcf7bca26da342a4"} Nov 28 18:13:43 crc kubenswrapper[4884]: I1128 18:13:43.547841 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" podStartSLOduration=2.141075983 podStartE2EDuration="7.547820716s" podCreationTimestamp="2025-11-28 18:13:36 +0000 UTC" firstStartedPulling="2025-11-28 18:13:37.407413049 +0000 UTC m=+10456.970196860" lastFinishedPulling="2025-11-28 18:13:42.814157772 +0000 UTC m=+10462.376941593" observedRunningTime="2025-11-28 18:13:43.532557973 +0000 UTC m=+10463.095341774" watchObservedRunningTime="2025-11-28 18:13:43.547820716 +0000 UTC m=+10463.110604527" Nov 28 18:13:43 crc kubenswrapper[4884]: I1128 18:13:43.553018 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dktpw" podStartSLOduration=3.202514972 podStartE2EDuration="8.553001842s" podCreationTimestamp="2025-11-28 18:13:35 +0000 UTC" firstStartedPulling="2025-11-28 18:13:37.432512062 +0000 UTC m=+10456.995295863" lastFinishedPulling="2025-11-28 18:13:42.782998912 +0000 UTC m=+10462.345782733" observedRunningTime="2025-11-28 18:13:43.55169254 +0000 UTC m=+10463.114476351" watchObservedRunningTime="2025-11-28 18:13:43.553001842 +0000 UTC m=+10463.115785643" Nov 28 18:13:45 crc kubenswrapper[4884]: I1128 18:13:45.822363 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:45 crc kubenswrapper[4884]: I1128 18:13:45.822889 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:45 crc kubenswrapper[4884]: I1128 18:13:45.880285 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.535010 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-99nqf"] Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.537466 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.539982 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-cdhk7"/"default-dockercfg-cqndj" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.683726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rr7kq\" (UniqueName: \"kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.684542 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.787212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rr7kq\" (UniqueName: \"kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.787650 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.787793 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.808226 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rr7kq\" (UniqueName: \"kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq\") pod \"crc-debug-99nqf\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: I1128 18:13:47.858323 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:13:47 crc kubenswrapper[4884]: W1128 18:13:47.891251 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddda5c4b9_a0b0_407a_b84e_787a71803328.slice/crio-93544851a423dd42f1c074fc03201c93703747e345881b79888d85e09c8af975 WatchSource:0}: Error finding container 93544851a423dd42f1c074fc03201c93703747e345881b79888d85e09c8af975: Status 404 returned error can't find the container with id 93544851a423dd42f1c074fc03201c93703747e345881b79888d85e09c8af975 Nov 28 18:13:48 crc kubenswrapper[4884]: I1128 18:13:48.583544 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" event={"ID":"dda5c4b9-a0b0-407a-b84e-787a71803328","Type":"ContainerStarted","Data":"93544851a423dd42f1c074fc03201c93703747e345881b79888d85e09c8af975"} Nov 28 18:13:51 crc kubenswrapper[4884]: I1128 18:13:51.242671 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:13:51 crc kubenswrapper[4884]: I1128 18:13:51.242962 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:13:55 crc kubenswrapper[4884]: I1128 18:13:55.887049 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:13:55 crc kubenswrapper[4884]: I1128 18:13:55.940731 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:13:56 crc kubenswrapper[4884]: I1128 18:13:56.660889 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dktpw" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="registry-server" containerID="cri-o://84ff121909cbc073865a680ed9305dcd93983b4ee05540e5dcf7bca26da342a4" gracePeriod=2 Nov 28 18:13:57 crc kubenswrapper[4884]: I1128 18:13:57.681684 4884 generic.go:334] "Generic (PLEG): container finished" podID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerID="84ff121909cbc073865a680ed9305dcd93983b4ee05540e5dcf7bca26da342a4" exitCode=0 Nov 28 18:13:57 crc kubenswrapper[4884]: I1128 18:13:57.681892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerDied","Data":"84ff121909cbc073865a680ed9305dcd93983b4ee05540e5dcf7bca26da342a4"} Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.320474 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.360330 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56fq5\" (UniqueName: \"kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5\") pod \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.360830 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities\") pod \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.360927 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content\") pod \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\" (UID: \"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c\") " Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.362541 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities" (OuterVolumeSpecName: "utilities") pod "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" (UID: "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.370905 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5" (OuterVolumeSpecName: "kube-api-access-56fq5") pod "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" (UID: "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c"). InnerVolumeSpecName "kube-api-access-56fq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.407533 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" (UID: "a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.463041 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.463360 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.463375 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56fq5\" (UniqueName: \"kubernetes.io/projected/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c-kube-api-access-56fq5\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.762726 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dktpw" event={"ID":"a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c","Type":"ContainerDied","Data":"13ddc842c6405a722aa94650617c4b39db095c5e0e8c63c1f4e87ba4b3a437c0"} Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.762785 4884 scope.go:117] "RemoveContainer" containerID="84ff121909cbc073865a680ed9305dcd93983b4ee05540e5dcf7bca26da342a4" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.762743 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dktpw" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.768959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" event={"ID":"dda5c4b9-a0b0-407a-b84e-787a71803328","Type":"ContainerStarted","Data":"4556c1721ed63927b86efb06f8434277b9f280971651e9abc54f04e09f111b86"} Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.795781 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" podStartSLOduration=1.703289407 podStartE2EDuration="13.795761441s" podCreationTimestamp="2025-11-28 18:13:47 +0000 UTC" firstStartedPulling="2025-11-28 18:13:47.896824376 +0000 UTC m=+10467.459608177" lastFinishedPulling="2025-11-28 18:13:59.98929641 +0000 UTC m=+10479.552080211" observedRunningTime="2025-11-28 18:14:00.78834563 +0000 UTC m=+10480.351129441" watchObservedRunningTime="2025-11-28 18:14:00.795761441 +0000 UTC m=+10480.358545242" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.813764 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.814258 4884 scope.go:117] "RemoveContainer" containerID="9b72e4fae5fa3159bebf9022d202356f73b97571d510aa22479bedebeaa8eab4" Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.822229 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dktpw"] Nov 28 18:14:00 crc kubenswrapper[4884]: I1128 18:14:00.847277 4884 scope.go:117] "RemoveContainer" containerID="0af4c49e61ce0beb5667c31d1363664c00f62fe72c686b57105759b6fec614c0" Nov 28 18:14:02 crc kubenswrapper[4884]: I1128 18:14:02.730231 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" path="/var/lib/kubelet/pods/a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c/volumes" Nov 28 18:14:16 crc kubenswrapper[4884]: I1128 18:14:16.927262 4884 generic.go:334] "Generic (PLEG): container finished" podID="dda5c4b9-a0b0-407a-b84e-787a71803328" containerID="4556c1721ed63927b86efb06f8434277b9f280971651e9abc54f04e09f111b86" exitCode=0 Nov 28 18:14:16 crc kubenswrapper[4884]: I1128 18:14:16.927348 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" event={"ID":"dda5c4b9-a0b0-407a-b84e-787a71803328","Type":"ContainerDied","Data":"4556c1721ed63927b86efb06f8434277b9f280971651e9abc54f04e09f111b86"} Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.074848 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.114055 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-99nqf"] Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.123205 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-99nqf"] Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.136569 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rr7kq\" (UniqueName: \"kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq\") pod \"dda5c4b9-a0b0-407a-b84e-787a71803328\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.137130 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host\") pod \"dda5c4b9-a0b0-407a-b84e-787a71803328\" (UID: \"dda5c4b9-a0b0-407a-b84e-787a71803328\") " Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.137235 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host" (OuterVolumeSpecName: "host") pod "dda5c4b9-a0b0-407a-b84e-787a71803328" (UID: "dda5c4b9-a0b0-407a-b84e-787a71803328"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.137944 4884 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dda5c4b9-a0b0-407a-b84e-787a71803328-host\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.143408 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq" (OuterVolumeSpecName: "kube-api-access-rr7kq") pod "dda5c4b9-a0b0-407a-b84e-787a71803328" (UID: "dda5c4b9-a0b0-407a-b84e-787a71803328"). InnerVolumeSpecName "kube-api-access-rr7kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.239961 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rr7kq\" (UniqueName: \"kubernetes.io/projected/dda5c4b9-a0b0-407a-b84e-787a71803328-kube-api-access-rr7kq\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.707164 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dda5c4b9-a0b0-407a-b84e-787a71803328" path="/var/lib/kubelet/pods/dda5c4b9-a0b0-407a-b84e-787a71803328/volumes" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.949752 4884 scope.go:117] "RemoveContainer" containerID="4556c1721ed63927b86efb06f8434277b9f280971651e9abc54f04e09f111b86" Nov 28 18:14:18 crc kubenswrapper[4884]: I1128 18:14:18.949889 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-99nqf" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.350135 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-vctx7"] Nov 28 18:14:19 crc kubenswrapper[4884]: E1128 18:14:19.350663 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="extract-utilities" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.350680 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="extract-utilities" Nov 28 18:14:19 crc kubenswrapper[4884]: E1128 18:14:19.350700 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="extract-content" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.350709 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="extract-content" Nov 28 18:14:19 crc kubenswrapper[4884]: E1128 18:14:19.350742 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="registry-server" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.350751 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="registry-server" Nov 28 18:14:19 crc kubenswrapper[4884]: E1128 18:14:19.350784 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dda5c4b9-a0b0-407a-b84e-787a71803328" containerName="container-00" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.350792 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="dda5c4b9-a0b0-407a-b84e-787a71803328" containerName="container-00" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.351026 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="dda5c4b9-a0b0-407a-b84e-787a71803328" containerName="container-00" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.351054 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a85b9fa2-6811-4b6b-8a60-f4a0aa530a6c" containerName="registry-server" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.352031 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.354265 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-cdhk7"/"default-dockercfg-cqndj" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.465586 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.465980 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x6xj\" (UniqueName: \"kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.568133 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x6xj\" (UniqueName: \"kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.568252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.568316 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.585662 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x6xj\" (UniqueName: \"kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj\") pod \"crc-debug-vctx7\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.669391 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:19 crc kubenswrapper[4884]: W1128 18:14:19.711288 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca0ee3b1_79c2_4d2a_b648_099fedcbab0b.slice/crio-3ab3d8db128fda615d31576dc909fbccaf16f06d1b058bf4608dcee559f3ed19 WatchSource:0}: Error finding container 3ab3d8db128fda615d31576dc909fbccaf16f06d1b058bf4608dcee559f3ed19: Status 404 returned error can't find the container with id 3ab3d8db128fda615d31576dc909fbccaf16f06d1b058bf4608dcee559f3ed19 Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.961466 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" event={"ID":"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b","Type":"ContainerStarted","Data":"103ed07beab9d76c36381ecd143b76f1bd7967f798c1f34f2cba25d220248f12"} Nov 28 18:14:19 crc kubenswrapper[4884]: I1128 18:14:19.961780 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" event={"ID":"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b","Type":"ContainerStarted","Data":"3ab3d8db128fda615d31576dc909fbccaf16f06d1b058bf4608dcee559f3ed19"} Nov 28 18:14:20 crc kubenswrapper[4884]: I1128 18:14:20.983985 4884 generic.go:334] "Generic (PLEG): container finished" podID="ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" containerID="103ed07beab9d76c36381ecd143b76f1bd7967f798c1f34f2cba25d220248f12" exitCode=1 Nov 28 18:14:20 crc kubenswrapper[4884]: I1128 18:14:20.984282 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" event={"ID":"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b","Type":"ContainerDied","Data":"103ed07beab9d76c36381ecd143b76f1bd7967f798c1f34f2cba25d220248f12"} Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.039651 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-vctx7"] Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.063793 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-cdhk7/crc-debug-vctx7"] Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.243180 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.243494 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.243600 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.244494 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:14:21 crc kubenswrapper[4884]: I1128 18:14:21.244633 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78" gracePeriod=600 Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.008973 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78" exitCode=0 Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.009123 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78"} Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.009501 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3"} Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.009523 4884 scope.go:117] "RemoveContainer" containerID="392a1a7fdf657ad007f3f51e3acf1fe0ad3aa7489f387fea01e4a841048b754d" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.124718 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.229309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host\") pod \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.229573 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x6xj\" (UniqueName: \"kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj\") pod \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\" (UID: \"ca0ee3b1-79c2-4d2a-b648-099fedcbab0b\") " Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.229675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host" (OuterVolumeSpecName: "host") pod "ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" (UID: "ca0ee3b1-79c2-4d2a-b648-099fedcbab0b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.230117 4884 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-host\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.242781 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj" (OuterVolumeSpecName: "kube-api-access-9x6xj") pod "ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" (UID: "ca0ee3b1-79c2-4d2a-b648-099fedcbab0b"). InnerVolumeSpecName "kube-api-access-9x6xj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.332559 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x6xj\" (UniqueName: \"kubernetes.io/projected/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b-kube-api-access-9x6xj\") on node \"crc\" DevicePath \"\"" Nov 28 18:14:22 crc kubenswrapper[4884]: I1128 18:14:22.701737 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" path="/var/lib/kubelet/pods/ca0ee3b1-79c2-4d2a-b648-099fedcbab0b/volumes" Nov 28 18:14:23 crc kubenswrapper[4884]: I1128 18:14:23.020422 4884 scope.go:117] "RemoveContainer" containerID="103ed07beab9d76c36381ecd143b76f1bd7967f798c1f34f2cba25d220248f12" Nov 28 18:14:23 crc kubenswrapper[4884]: I1128 18:14:23.020421 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/crc-debug-vctx7" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.175857 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf"] Nov 28 18:15:00 crc kubenswrapper[4884]: E1128 18:15:00.177053 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" containerName="container-00" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.177072 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" containerName="container-00" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.177393 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca0ee3b1-79c2-4d2a-b648-099fedcbab0b" containerName="container-00" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.178251 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.180344 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.180413 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.190918 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf"] Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.296743 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.296815 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.297154 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7k5k\" (UniqueName: \"kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.402806 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7k5k\" (UniqueName: \"kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.402956 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.402995 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.403781 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.413675 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.446017 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7k5k\" (UniqueName: \"kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k\") pod \"collect-profiles-29405895-9zrhf\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.501346 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:00 crc kubenswrapper[4884]: I1128 18:15:00.968108 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf"] Nov 28 18:15:01 crc kubenswrapper[4884]: I1128 18:15:01.459242 4884 generic.go:334] "Generic (PLEG): container finished" podID="b69ab527-c37e-4383-bad0-a9616447278d" containerID="d7b716ebc5aff834d60a52cf7de77b0d199938b3f6f84279bec1459612df1bd4" exitCode=0 Nov 28 18:15:01 crc kubenswrapper[4884]: I1128 18:15:01.459342 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" event={"ID":"b69ab527-c37e-4383-bad0-a9616447278d","Type":"ContainerDied","Data":"d7b716ebc5aff834d60a52cf7de77b0d199938b3f6f84279bec1459612df1bd4"} Nov 28 18:15:01 crc kubenswrapper[4884]: I1128 18:15:01.459453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" event={"ID":"b69ab527-c37e-4383-bad0-a9616447278d","Type":"ContainerStarted","Data":"b2bf8a0e2b53202bfc9768c8904baf3c31200bed2401821f8ed8caf57d0d42aa"} Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.863799 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.960645 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7k5k\" (UniqueName: \"kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k\") pod \"b69ab527-c37e-4383-bad0-a9616447278d\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.960843 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume\") pod \"b69ab527-c37e-4383-bad0-a9616447278d\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.960994 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume\") pod \"b69ab527-c37e-4383-bad0-a9616447278d\" (UID: \"b69ab527-c37e-4383-bad0-a9616447278d\") " Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.961640 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume" (OuterVolumeSpecName: "config-volume") pod "b69ab527-c37e-4383-bad0-a9616447278d" (UID: "b69ab527-c37e-4383-bad0-a9616447278d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.967047 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k" (OuterVolumeSpecName: "kube-api-access-w7k5k") pod "b69ab527-c37e-4383-bad0-a9616447278d" (UID: "b69ab527-c37e-4383-bad0-a9616447278d"). InnerVolumeSpecName "kube-api-access-w7k5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:15:02 crc kubenswrapper[4884]: I1128 18:15:02.981608 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b69ab527-c37e-4383-bad0-a9616447278d" (UID: "b69ab527-c37e-4383-bad0-a9616447278d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.063662 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b69ab527-c37e-4383-bad0-a9616447278d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.063693 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b69ab527-c37e-4383-bad0-a9616447278d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.063704 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7k5k\" (UniqueName: \"kubernetes.io/projected/b69ab527-c37e-4383-bad0-a9616447278d-kube-api-access-w7k5k\") on node \"crc\" DevicePath \"\"" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.483517 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" event={"ID":"b69ab527-c37e-4383-bad0-a9616447278d","Type":"ContainerDied","Data":"b2bf8a0e2b53202bfc9768c8904baf3c31200bed2401821f8ed8caf57d0d42aa"} Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.483561 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405895-9zrhf" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.483574 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2bf8a0e2b53202bfc9768c8904baf3c31200bed2401821f8ed8caf57d0d42aa" Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.963905 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v"] Nov 28 18:15:03 crc kubenswrapper[4884]: I1128 18:15:03.978197 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405850-pvf9v"] Nov 28 18:15:04 crc kubenswrapper[4884]: I1128 18:15:04.701973 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48016531-4bcd-4141-b403-f8c563192ce7" path="/var/lib/kubelet/pods/48016531-4bcd-4141-b403-f8c563192ce7/volumes" Nov 28 18:15:39 crc kubenswrapper[4884]: I1128 18:15:39.414594 4884 scope.go:117] "RemoveContainer" containerID="2e571cccf3b0f63ed720de15839bcb9522e76141f7cfbf56caffba22d4178449" Nov 28 18:16:21 crc kubenswrapper[4884]: I1128 18:16:21.242986 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:16:21 crc kubenswrapper[4884]: I1128 18:16:21.243567 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:16:51 crc kubenswrapper[4884]: I1128 18:16:51.243316 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:16:51 crc kubenswrapper[4884]: I1128 18:16:51.243971 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:17:21 crc kubenswrapper[4884]: I1128 18:17:21.243767 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:17:21 crc kubenswrapper[4884]: I1128 18:17:21.244604 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:17:21 crc kubenswrapper[4884]: I1128 18:17:21.244762 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" Nov 28 18:17:21 crc kubenswrapper[4884]: I1128 18:17:21.246782 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3"} pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 18:17:21 crc kubenswrapper[4884]: I1128 18:17:21.246960 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" containerID="cri-o://2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" gracePeriod=600 Nov 28 18:17:21 crc kubenswrapper[4884]: E1128 18:17:21.389157 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:17:22 crc kubenswrapper[4884]: I1128 18:17:22.192809 4884 generic.go:334] "Generic (PLEG): container finished" podID="120c26c6-4231-418f-a5af-738dc44915f8" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" exitCode=0 Nov 28 18:17:22 crc kubenswrapper[4884]: I1128 18:17:22.192879 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerDied","Data":"2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3"} Nov 28 18:17:22 crc kubenswrapper[4884]: I1128 18:17:22.192973 4884 scope.go:117] "RemoveContainer" containerID="9c7f9c5b7102416ef4bc39b5e89536f417a9cacb447866f7fcbf32207fa17c78" Nov 28 18:17:22 crc kubenswrapper[4884]: I1128 18:17:22.194202 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:17:22 crc kubenswrapper[4884]: E1128 18:17:22.194949 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.261874 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7f935e19-a4e0-4ac8-8706-341c2a6495a0/init-config-reloader/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.513679 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7f935e19-a4e0-4ac8-8706-341c2a6495a0/alertmanager/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.539755 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7f935e19-a4e0-4ac8-8706-341c2a6495a0/init-config-reloader/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.542336 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_7f935e19-a4e0-4ac8-8706-341c2a6495a0/config-reloader/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.766481 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a27f8ed5-7890-49f1-9330-9b8c61114002/aodh-api/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.771438 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a27f8ed5-7890-49f1-9330-9b8c61114002/aodh-evaluator/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.827002 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a27f8ed5-7890-49f1-9330-9b8c61114002/aodh-listener/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.959544 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_a27f8ed5-7890-49f1-9330-9b8c61114002/aodh-notifier/0.log" Nov 28 18:17:34 crc kubenswrapper[4884]: I1128 18:17:34.975836 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-77fc564658-4lcch_68374775-daef-4964-ad6c-7c9411287fdd/barbican-api/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.044825 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-77fc564658-4lcch_68374775-daef-4964-ad6c-7c9411287fdd/barbican-api-log/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.200996 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7bbd9868f8-pwsck_6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1/barbican-keystone-listener/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.228667 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7bbd9868f8-pwsck_6ffb554a-f0b6-4723-aa14-ce6b4f46bbf1/barbican-keystone-listener-log/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.395505 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-59676fff-jblk7_d7e17112-8210-4348-a0c8-16845aaf9633/barbican-worker-log/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.413006 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-59676fff-jblk7_d7e17112-8210-4348-a0c8-16845aaf9633/barbican-worker/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.534767 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-t5cc9_9300d8e8-5928-41e7-b7d8-a073b49ce0af/bootstrap-openstack-openstack-cell1/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.623305 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bc953678-c24b-48f0-b9b7-606fd6418e97/ceilometer-central-agent/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.753639 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bc953678-c24b-48f0-b9b7-606fd6418e97/ceilometer-notification-agent/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.766117 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bc953678-c24b-48f0-b9b7-606fd6418e97/proxy-httpd/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.825466 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_bc953678-c24b-48f0-b9b7-606fd6418e97/sg-core/0.log" Nov 28 18:17:35 crc kubenswrapper[4884]: I1128 18:17:35.957964 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-mntbr_90ffd366-f202-4fd6-a806-6a7ee4d2a22a/ceph-client-openstack-openstack-cell1/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.105080 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1ef97aa-68ff-487f-8f54-acb5071d9f03/cinder-api-log/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.166406 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_d1ef97aa-68ff-487f-8f54-acb5071d9f03/cinder-api/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.373146 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9229318b-2501-439a-8422-4a2e8c837748/probe/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.390650 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_9229318b-2501-439a-8422-4a2e8c837748/cinder-backup/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.456026 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_76a4dca2-e8f2-40eb-8918-61cb5e11db55/cinder-scheduler/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.688281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_76a4dca2-e8f2-40eb-8918-61cb5e11db55/probe/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.688485 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:17:36 crc kubenswrapper[4884]: E1128 18:17:36.688735 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.726281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_8602ccbe-13e0-4f57-8794-1e2f86802ae1/probe/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.728952 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_8602ccbe-13e0-4f57-8794-1e2f86802ae1/cinder-volume/0.log" Nov 28 18:17:36 crc kubenswrapper[4884]: I1128 18:17:36.917912 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-b67pg_a4bfa85c-2676-434e-bd4e-bf610fe32231/configure-network-openstack-openstack-cell1/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.001531 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-z74tb_9671879e-8915-469b-882c-b25003ce0d21/configure-os-openstack-openstack-cell1/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.201871 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-85cfb46855-c2bnw_86f307a6-ec5a-4857-8c6c-954119d2ef82/init/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.347526 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-85cfb46855-c2bnw_86f307a6-ec5a-4857-8c6c-954119d2ef82/init/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.401986 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-85cfb46855-c2bnw_86f307a6-ec5a-4857-8c6c-954119d2ef82/dnsmasq-dns/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.473171 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-8wpwr_5bc9bcb4-cbd4-4422-9722-16084405cf50/download-cache-openstack-openstack-cell1/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.611063 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_df5003b3-9cce-4245-9b5a-1d6fb634d2e1/glance-httpd/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.651008 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_df5003b3-9cce-4245-9b5a-1d6fb634d2e1/glance-log/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.798854 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_55cff4b4-3b59-43d0-8932-2407e4417f81/glance-httpd/0.log" Nov 28 18:17:37 crc kubenswrapper[4884]: I1128 18:17:37.827815 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_55cff4b4-3b59-43d0-8932-2407e4417f81/glance-log/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.007928 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-698bf6cff5-9rpkn_cff6e56f-02dd-4083-9493-0fca54d1ca6b/heat-api/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.148188 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5989c7c6f8-rhlzw_3b136c41-55db-4e2f-ba80-1e3e80ff5d64/heat-cfnapi/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.170365 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-c8f756f68-c4d7x_e275aeed-0618-42c8-8be3-61142cc18046/heat-engine/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.378658 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6dc467574f-qbf7p_26e3dc83-2fac-49a2-84c9-cd2b20139b3d/horizon/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.405768 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-z44rr_fd73d060-ec7b-4d2e-9811-3a6495f53a42/install-certs-openstack-openstack-cell1/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.495196 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6dc467574f-qbf7p_26e3dc83-2fac-49a2-84c9-cd2b20139b3d/horizon-log/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.628668 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-fcw4g_51790cc2-00ff-4f07-9231-515e37777c81/install-os-openstack-openstack-cell1/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.798859 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-8588877946-hnncc_bfccd8a8-1ad7-4c08-85ae-b826a0309318/keystone-api/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.917017 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405881-g8k7b_6760a8ef-6d8d-4afa-b76f-64b152fec02f/keystone-cron/0.log" Nov 28 18:17:38 crc kubenswrapper[4884]: I1128 18:17:38.943909 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405821-56wvr_ad34ca9a-7c6d-45d4-a4f6-3f92d44d247d/keystone-cron/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.085628 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0525b455-8b24-46be-af20-2a91f79b2eae/kube-state-metrics/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.197883 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-l46sp_07c45a00-0b6d-44ae-bf00-566fa5d81f4e/libvirt-openstack-openstack-cell1/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.517414 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_086b0687-4bf9-4a0c-abad-d7bff8dbedc3/manila-api-log/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.552535 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_086b0687-4bf9-4a0c-abad-d7bff8dbedc3/manila-api/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.660940 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_0e04e709-fc63-46e0-9659-7907d2af5dc6/manila-scheduler/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.707178 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_0e04e709-fc63-46e0-9659-7907d2af5dc6/probe/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.818877 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_02299b28-a3d4-47f3-8b14-7bc4bd0ebc53/manila-share/0.log" Nov 28 18:17:39 crc kubenswrapper[4884]: I1128 18:17:39.895968 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_02299b28-a3d4-47f3-8b14-7bc4bd0ebc53/probe/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.196031 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79566747c-k6tf8_3aa02ca0-aa88-45f0-81cc-38c0b6204d1b/neutron-httpd/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.200876 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79566747c-k6tf8_3aa02ca0-aa88-45f0-81cc-38c0b6204d1b/neutron-api/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.459143 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-42kgt_436513f6-b254-482c-96a2-12faf0ab7f10/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.502433 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-mmfl4_0a900132-e2fc-4366-97ca-67ca52ce4ee6/neutron-metadata-openstack-openstack-cell1/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.686725 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-xgw8z_c214f7b4-074f-4801-b6b6-375669694260/neutron-sriov-openstack-openstack-cell1/0.log" Nov 28 18:17:40 crc kubenswrapper[4884]: I1128 18:17:40.871127 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_1b761da9-cfa3-47d8-9738-c2dcb53921d1/nova-api-api/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.016041 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_1b761da9-cfa3-47d8-9738-c2dcb53921d1/nova-api-log/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.194129 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_bf2cb4fa-52dc-4714-8cf3-35be415c6f9f/nova-cell0-conductor-conductor/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.360291 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_88c9b274-5227-489e-bb7b-9b9469cf35c8/nova-cell1-conductor-conductor/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.535736 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_f3fabc95-602e-4977-b1e7-2a7eebb084c9/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.676697 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cellmjrlj_09df7760-4d98-47f8-b867-6b19765ca19f/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.806313 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-kgz9v_9b18111a-9199-4f55-8a8b-a740c1fec6dd/nova-cell1-openstack-openstack-cell1/0.log" Nov 28 18:17:41 crc kubenswrapper[4884]: I1128 18:17:41.955104 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7b94860e-4dad-4539-9261-843db90fa876/nova-metadata-metadata/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.007498 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7b94860e-4dad-4539-9261-843db90fa876/nova-metadata-log/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.148646 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_5b6665fd-4507-4d5c-88ec-678f989dd692/nova-scheduler-scheduler/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.247133 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-777656c4c8-725l2_232825e9-21d7-4a6b-86ac-b9f32f33d783/init/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.440046 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-777656c4c8-725l2_232825e9-21d7-4a6b-86ac-b9f32f33d783/init/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.533328 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-777656c4c8-725l2_232825e9-21d7-4a6b-86ac-b9f32f33d783/octavia-api-provider-agent/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.673914 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-qs24q_6fc7cf80-86a9-4afa-9070-9c04e72bd38a/init/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.780330 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-777656c4c8-725l2_232825e9-21d7-4a6b-86ac-b9f32f33d783/octavia-api/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.885671 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-qs24q_6fc7cf80-86a9-4afa-9070-9c04e72bd38a/init/0.log" Nov 28 18:17:42 crc kubenswrapper[4884]: I1128 18:17:42.985454 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-qs24q_6fc7cf80-86a9-4afa-9070-9c04e72bd38a/octavia-healthmanager/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.070338 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z2w2b_61a616f8-3c9b-4e6e-86dc-1009cfe68cc1/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.243379 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z2w2b_61a616f8-3c9b-4e6e-86dc-1009cfe68cc1/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.293032 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-7ltcn_1b820105-7d30-4fde-b776-ed3e9aaad018/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.331748 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-z2w2b_61a616f8-3c9b-4e6e-86dc-1009cfe68cc1/octavia-housekeeping/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.566063 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-7ltcn_1b820105-7d30-4fde-b776-ed3e9aaad018/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.605422 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-7ltcn_1b820105-7d30-4fde-b776-ed3e9aaad018/octavia-amphora-httpd/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.653892 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-4r269_58718a63-4d7e-4e0b-bd7f-140dfbdf18a5/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.810341 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-4r269_58718a63-4d7e-4e0b-bd7f-140dfbdf18a5/init/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.854962 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-4r269_58718a63-4d7e-4e0b-bd7f-140dfbdf18a5/octavia-rsyslog/0.log" Nov 28 18:17:43 crc kubenswrapper[4884]: I1128 18:17:43.900566 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-52gdk_ab7773db-d172-488a-9836-537d682406c3/init/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.135144 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-52gdk_ab7773db-d172-488a-9836-537d682406c3/init/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.166406 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c3fd4b88-fe6f-4f27-8e1c-26360e576cf6/mysql-bootstrap/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.344350 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-52gdk_ab7773db-d172-488a-9836-537d682406c3/octavia-worker/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.510152 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c3fd4b88-fe6f-4f27-8e1c-26360e576cf6/galera/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.515167 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_c3fd4b88-fe6f-4f27-8e1c-26360e576cf6/mysql-bootstrap/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.643443 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e64ccfeb-af75-4fa7-adb5-919c41a2f261/mysql-bootstrap/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.838862 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_d87676c1-cb88-4f8a-8151-421c0ef330fe/openstackclient/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.885789 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e64ccfeb-af75-4fa7-adb5-919c41a2f261/mysql-bootstrap/0.log" Nov 28 18:17:44 crc kubenswrapper[4884]: I1128 18:17:44.899191 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e64ccfeb-af75-4fa7-adb5-919c41a2f261/galera/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.137769 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-lcdfn_7835ecf0-f178-4a1c-b6d2-50c1ce35e0aa/ovn-controller/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.383014 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-2wx5g_29a42ba8-dba1-4527-86bb-3d6b53a008c8/openstack-network-exporter/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.479662 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xkdfq_a5d86993-2f86-4069-aff5-123cfba6f2a6/ovsdb-server-init/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.606933 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xkdfq_a5d86993-2f86-4069-aff5-123cfba6f2a6/ovs-vswitchd/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.628322 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xkdfq_a5d86993-2f86-4069-aff5-123cfba6f2a6/ovsdb-server-init/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.709929 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xkdfq_a5d86993-2f86-4069-aff5-123cfba6f2a6/ovsdb-server/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.871879 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_adc27ec0-61ce-4045-98ff-894f7bf14067/openstack-network-exporter/0.log" Nov 28 18:17:45 crc kubenswrapper[4884]: I1128 18:17:45.915545 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_adc27ec0-61ce-4045-98ff-894f7bf14067/ovn-northd/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.209402 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a5817509-87ff-4139-a4af-c95881825b8a/openstack-network-exporter/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.212283 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-5gr76_b3a9fdf6-1044-49c8-ac27-e5d424219c3f/ovn-openstack-openstack-cell1/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.316046 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_a5817509-87ff-4139-a4af-c95881825b8a/ovsdbserver-nb/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.490432 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_af2877a4-7652-44cc-a491-21592c862759/openstack-network-exporter/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.507914 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_af2877a4-7652-44cc-a491-21592c862759/ovsdbserver-nb/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.628920 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_93b03444-4c92-4783-90ba-91fd986a3c55/openstack-network-exporter/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.725621 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_93b03444-4c92-4783-90ba-91fd986a3c55/ovsdbserver-nb/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.881963 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ab5a5a79-99a6-488e-a1d0-68b07c36b62e/openstack-network-exporter/0.log" Nov 28 18:17:46 crc kubenswrapper[4884]: I1128 18:17:46.965144 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_ab5a5a79-99a6-488e-a1d0-68b07c36b62e/ovsdbserver-sb/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.030440 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_13d20f27-740b-463c-ae05-cae54c02c404/openstack-network-exporter/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.102379 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_13d20f27-740b-463c-ae05-cae54c02c404/ovsdbserver-sb/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.298906 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_c638bade-936d-4342-8146-527c2cb80373/openstack-network-exporter/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.350533 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_c638bade-936d-4342-8146-527c2cb80373/ovsdbserver-sb/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.740213 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-54bd44d4c6-xgndh_a7acff93-9169-40ef-9c9b-85d14789248a/placement-api/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.936934 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-54bd44d4c6-xgndh_a7acff93-9169-40ef-9c9b-85d14789248a/placement-log/0.log" Nov 28 18:17:47 crc kubenswrapper[4884]: I1128 18:17:47.968680 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cx72gd_69a82a4c-156c-4447-8610-2d7f9572a42d/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.380999 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4c97248e-a5bc-4d82-a535-f4701b40dbf0/init-config-reloader/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.462492 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4c97248e-a5bc-4d82-a535-f4701b40dbf0/config-reloader/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.476891 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4c97248e-a5bc-4d82-a535-f4701b40dbf0/prometheus/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.527221 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4c97248e-a5bc-4d82-a535-f4701b40dbf0/init-config-reloader/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.620472 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_4c97248e-a5bc-4d82-a535-f4701b40dbf0/thanos-sidecar/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.683071 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_00782f1b-8777-480e-bb85-1a4cafb77cee/setup-container/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.941355 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_00782f1b-8777-480e-bb85-1a4cafb77cee/rabbitmq/0.log" Nov 28 18:17:48 crc kubenswrapper[4884]: I1128 18:17:48.967583 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_00782f1b-8777-480e-bb85-1a4cafb77cee/setup-container/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.074685 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b9c35913-c5bf-4459-8252-a5ba99fb302b/setup-container/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.200946 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b9c35913-c5bf-4459-8252-a5ba99fb302b/rabbitmq/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.208501 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_b9c35913-c5bf-4459-8252-a5ba99fb302b/setup-container/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.303145 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-m9qqn_d902a353-422e-45fc-90ca-6f46279c8954/reboot-os-openstack-openstack-cell1/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.465940 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-pxx8h_d303124f-49ec-447f-b8df-ff946aa08d58/run-os-openstack-openstack-cell1/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.637355 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-hstkk_18dd9864-4a06-4605-812e-0e70eb1ede62/ssh-known-hosts-openstack/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.808181 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-4tvmx_1c9d690e-b787-4b1a-a760-83f68d6a69a0/telemetry-openstack-openstack-cell1/0.log" Nov 28 18:17:49 crc kubenswrapper[4884]: I1128 18:17:49.974925 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-llv6t_4b7c2ba0-5ceb-4b30-ae1b-eea35beedb68/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 28 18:17:50 crc kubenswrapper[4884]: I1128 18:17:50.024516 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-sb4x9_9e362df8-70ef-4d2a-b224-fb3dd8c05732/validate-network-openstack-openstack-cell1/0.log" Nov 28 18:17:50 crc kubenswrapper[4884]: I1128 18:17:50.509650 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_b6285863-9c40-4877-891e-acc716bb80d4/memcached/0.log" Nov 28 18:17:51 crc kubenswrapper[4884]: I1128 18:17:51.688957 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:17:51 crc kubenswrapper[4884]: E1128 18:17:51.689641 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:03 crc kubenswrapper[4884]: I1128 18:18:03.688448 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:18:03 crc kubenswrapper[4884]: E1128 18:18:03.689213 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.059225 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/util/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.237048 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/util/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.272952 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/pull/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.278915 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/pull/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.438920 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/pull/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.474315 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/extract/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.493226 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_291e3763ba140037bb2485ba998b4d30af8bcf711c9c09c51f6cccd2f4gz8m2_581c69a1-7109-4834-be13-a37c3343212b/util/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.630674 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-xplsn_c54a7e19-62d7-474b-90f2-0411cc2a1942/kube-rbac-proxy/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.686679 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-j56b4_ef79df92-f12e-4606-8b3d-27b23a1bc3c7/kube-rbac-proxy/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.743009 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-5bfbbb859d-xplsn_c54a7e19-62d7-474b-90f2-0411cc2a1942/manager/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.875419 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-748967c98-j56b4_ef79df92-f12e-4606-8b3d-27b23a1bc3c7/manager/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.926986 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-5z54f_262d6bdb-061f-406a-bd20-c0e17112188a/kube-rbac-proxy/0.log" Nov 28 18:18:12 crc kubenswrapper[4884]: I1128 18:18:12.929330 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6788cc6d75-5z54f_262d6bdb-061f-406a-bd20-c0e17112188a/manager/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.116652 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-ddjbx_9831a921-a48d-4446-a5da-648cebb21936/kube-rbac-proxy/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.275242 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-85fbd69fcd-ddjbx_9831a921-a48d-4446-a5da-648cebb21936/manager/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.294561 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-4k5mj_fca659fc-dce8-4284-a2a4-bbeb59993bcf/kube-rbac-proxy/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.344349 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-698d6fd7d6-4k5mj_fca659fc-dce8-4284-a2a4-bbeb59993bcf/manager/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.471926 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-67f2r_6c2798c4-5cb6-4d34-9178-1d422470bbb1/kube-rbac-proxy/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.555138 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-7d5d9fd47f-67f2r_6c2798c4-5cb6-4d34-9178-1d422470bbb1/manager/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.607163 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-5qkwv_19969fcc-a2cf-4ed4-afd5-4d585f5dcb70/kube-rbac-proxy/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.828350 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-5xhvn_c4d80b0a-9e14-4a7a-8461-129c8cb07e9d/kube-rbac-proxy/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.874161 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-54485f899-5xhvn_c4d80b0a-9e14-4a7a-8461-129c8cb07e9d/manager/0.log" Nov 28 18:18:13 crc kubenswrapper[4884]: I1128 18:18:13.998469 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6c55d8d69b-5qkwv_19969fcc-a2cf-4ed4-afd5-4d585f5dcb70/manager/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.029643 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-zd6gg_36d673e0-6b01-4296-829d-ce3c935876ad/kube-rbac-proxy/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.145865 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-79cc9d59f5-zd6gg_36d673e0-6b01-4296-829d-ce3c935876ad/manager/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.269582 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-qbh5l_8bcfffae-7352-42ae-9fe3-c0a6e85a9301/kube-rbac-proxy/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.274618 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5cbc8c7f96-qbh5l_8bcfffae-7352-42ae-9fe3-c0a6e85a9301/manager/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.502648 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-lkq65_20081e0d-0be1-46c4-a60a-49034215b26d/kube-rbac-proxy/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.505658 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-hrrh7_4e61efa8-cdc4-4974-ab4b-5f81e26cd439/manager/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.512413 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-64d7c556cd-hrrh7_4e61efa8-cdc4-4974-ab4b-5f81e26cd439/kube-rbac-proxy/0.log" Nov 28 18:18:14 crc kubenswrapper[4884]: I1128 18:18:14.945609 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-422zp_26e94d5b-3a3c-49d9-910f-4f339780ec2f/kube-rbac-proxy/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.008545 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-58879495c-lkq65_20081e0d-0be1-46c4-a60a-49034215b26d/manager/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.131882 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79d658b66d-422zp_26e94d5b-3a3c-49d9-910f-4f339780ec2f/manager/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.189850 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-srsqz_eb06d6ab-34b7-4511-9680-6903fe6d50b7/kube-rbac-proxy/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.289885 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-d5fb87cb8-srsqz_eb06d6ab-34b7-4511-9680-6903fe6d50b7/manager/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.343304 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-gt8db_a96a807f-9e3f-426a-bd7c-b1c14db24baa/kube-rbac-proxy/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.421240 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-77868f484-gt8db_a96a807f-9e3f-426a-bd7c-b1c14db24baa/manager/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.559822 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-xr4cn_fe380e38-07d1-423d-8a43-51f2a0dc154f/kube-rbac-proxy/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.793848 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-2dlgq_62a970c8-14b9-462a-8e0f-773b9b2847c5/kube-rbac-proxy/0.log" Nov 28 18:18:15 crc kubenswrapper[4884]: I1128 18:18:15.974657 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-857c5c6d5d-2dlgq_62a970c8-14b9-462a-8e0f-773b9b2847c5/operator/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.037223 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-d8z4x_b487198c-9fa4-49b0-9adc-147b3e38dd94/registry-server/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.261530 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-89jvx_a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9/kube-rbac-proxy/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.286678 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5b67cfc8fb-89jvx_a92d70cf-b0eb-4cd9-8b45-4ffcca9c5ee9/manager/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.517761 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-zcwcr_23a976c8-631a-4d07-a1a1-d44fdbac2e06/kube-rbac-proxy/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.518283 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-867d87977b-zcwcr_23a976c8-631a-4d07-a1a1-d44fdbac2e06/manager/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.610459 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-xlwjj_ef2bba04-9b56-460b-8ad2-c5be6d08f79d/operator/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.740317 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5wqmv_6fb9fd98-178d-4461-a090-ae0a6b35e258/kube-rbac-proxy/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.850760 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-8f6687c44-5wqmv_6fb9fd98-178d-4461-a090-ae0a6b35e258/manager/0.log" Nov 28 18:18:16 crc kubenswrapper[4884]: I1128 18:18:16.969587 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-zfmh2_53444326-ebe6-45f5-a086-63ef03d1533a/kube-rbac-proxy/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.070709 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-5g9m9_7a96a060-30e3-4405-83cb-7a9b85d24c84/kube-rbac-proxy/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.203275 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-bb86466d8-5g9m9_7a96a060-30e3-4405-83cb-7a9b85d24c84/manager/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.296284 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-695797c565-zfmh2_53444326-ebe6-45f5-a086-63ef03d1533a/manager/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.361982 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-lsxjw_ac6ec22d-b6e9-49c7-9c84-222e82ba75d2/kube-rbac-proxy/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.443882 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b56b8849f-lsxjw_ac6ec22d-b6e9-49c7-9c84-222e82ba75d2/manager/0.log" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.687970 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:18:17 crc kubenswrapper[4884]: E1128 18:18:17.688319 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:17 crc kubenswrapper[4884]: I1128 18:18:17.845680 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-69699fdd55-xr4cn_fe380e38-07d1-423d-8a43-51f2a0dc154f/manager/0.log" Nov 28 18:18:30 crc kubenswrapper[4884]: I1128 18:18:30.699777 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:18:30 crc kubenswrapper[4884]: E1128 18:18:30.700891 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:33 crc kubenswrapper[4884]: I1128 18:18:33.997025 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-45lsh_74ebbc5f-8432-43be-afdc-5aebcfd1dbf1/control-plane-machine-set-operator/0.log" Nov 28 18:18:34 crc kubenswrapper[4884]: I1128 18:18:34.190691 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6tbrh_b074b05e-c5dd-4818-8808-b6207aff3514/machine-api-operator/0.log" Nov 28 18:18:34 crc kubenswrapper[4884]: I1128 18:18:34.210534 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-6tbrh_b074b05e-c5dd-4818-8808-b6207aff3514/kube-rbac-proxy/0.log" Nov 28 18:18:42 crc kubenswrapper[4884]: I1128 18:18:42.690535 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:18:42 crc kubenswrapper[4884]: E1128 18:18:42.691541 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.482945 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:18:45 crc kubenswrapper[4884]: E1128 18:18:45.483913 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b69ab527-c37e-4383-bad0-a9616447278d" containerName="collect-profiles" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.483926 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b69ab527-c37e-4383-bad0-a9616447278d" containerName="collect-profiles" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.484157 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b69ab527-c37e-4383-bad0-a9616447278d" containerName="collect-profiles" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.486059 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.514311 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.540912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.540970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftdhs\" (UniqueName: \"kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.541154 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.642945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.642992 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftdhs\" (UniqueName: \"kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.643161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.643620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.643640 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.666124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftdhs\" (UniqueName: \"kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs\") pod \"redhat-operators-7hzz8\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:45 crc kubenswrapper[4884]: I1128 18:18:45.804330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:46 crc kubenswrapper[4884]: I1128 18:18:46.344602 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:18:47 crc kubenswrapper[4884]: I1128 18:18:47.050692 4884 generic.go:334] "Generic (PLEG): container finished" podID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerID="0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76" exitCode=0 Nov 28 18:18:47 crc kubenswrapper[4884]: I1128 18:18:47.050734 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerDied","Data":"0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76"} Nov 28 18:18:47 crc kubenswrapper[4884]: I1128 18:18:47.050760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerStarted","Data":"ee767957d4e3bb0fd37f301e34b466923add53c27d8436ff95abb817a9b605ac"} Nov 28 18:18:47 crc kubenswrapper[4884]: I1128 18:18:47.053157 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:18:47 crc kubenswrapper[4884]: I1128 18:18:47.916965 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-h88rg_58d06855-5e34-4170-afba-4c4266ff7e35/cert-manager-controller/0.log" Nov 28 18:18:48 crc kubenswrapper[4884]: I1128 18:18:48.051934 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-r6cqc_5d300312-c908-4667-a71f-f1d6e3279dbd/cert-manager-cainjector/0.log" Nov 28 18:18:48 crc kubenswrapper[4884]: I1128 18:18:48.076752 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerStarted","Data":"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27"} Nov 28 18:18:48 crc kubenswrapper[4884]: I1128 18:18:48.175428 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-7jj8r_e6267b76-4b35-49cd-a61e-51906e210463/cert-manager-webhook/0.log" Nov 28 18:18:51 crc kubenswrapper[4884]: I1128 18:18:51.107867 4884 generic.go:334] "Generic (PLEG): container finished" podID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerID="717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27" exitCode=0 Nov 28 18:18:51 crc kubenswrapper[4884]: I1128 18:18:51.107939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerDied","Data":"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27"} Nov 28 18:18:52 crc kubenswrapper[4884]: I1128 18:18:52.127013 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerStarted","Data":"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad"} Nov 28 18:18:52 crc kubenswrapper[4884]: I1128 18:18:52.154863 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7hzz8" podStartSLOduration=2.407154072 podStartE2EDuration="7.154845292s" podCreationTimestamp="2025-11-28 18:18:45 +0000 UTC" firstStartedPulling="2025-11-28 18:18:47.052897827 +0000 UTC m=+10766.615681628" lastFinishedPulling="2025-11-28 18:18:51.800589017 +0000 UTC m=+10771.363372848" observedRunningTime="2025-11-28 18:18:52.147151273 +0000 UTC m=+10771.709935084" watchObservedRunningTime="2025-11-28 18:18:52.154845292 +0000 UTC m=+10771.717629093" Nov 28 18:18:54 crc kubenswrapper[4884]: I1128 18:18:54.689126 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:18:54 crc kubenswrapper[4884]: E1128 18:18:54.690068 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:18:55 crc kubenswrapper[4884]: I1128 18:18:55.805253 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:55 crc kubenswrapper[4884]: I1128 18:18:55.805540 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:18:56 crc kubenswrapper[4884]: I1128 18:18:56.861655 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7hzz8" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="registry-server" probeResult="failure" output=< Nov 28 18:18:56 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Nov 28 18:18:56 crc kubenswrapper[4884]: > Nov 28 18:19:01 crc kubenswrapper[4884]: I1128 18:19:01.946726 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-4nnzl_069d1c60-3d48-4f4b-848d-3dd850f26e3a/nmstate-console-plugin/0.log" Nov 28 18:19:02 crc kubenswrapper[4884]: I1128 18:19:02.086883 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-qpkc4_85a9a43f-4884-4277-af0b-2e7c0d88de32/nmstate-handler/0.log" Nov 28 18:19:02 crc kubenswrapper[4884]: I1128 18:19:02.169935 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hpbq5_4deccde2-5e37-4721-83a8-aaeacb1ccbe6/kube-rbac-proxy/0.log" Nov 28 18:19:02 crc kubenswrapper[4884]: I1128 18:19:02.174938 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-hpbq5_4deccde2-5e37-4721-83a8-aaeacb1ccbe6/nmstate-metrics/0.log" Nov 28 18:19:02 crc kubenswrapper[4884]: I1128 18:19:02.389634 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-k96zr_da26dcb7-e333-4e51-9b40-8ea6c744048e/nmstate-operator/0.log" Nov 28 18:19:02 crc kubenswrapper[4884]: I1128 18:19:02.414879 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-45zjs_b46aa294-0c5d-47f8-89a5-08b55bc74c95/nmstate-webhook/0.log" Nov 28 18:19:05 crc kubenswrapper[4884]: I1128 18:19:05.702934 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:19:05 crc kubenswrapper[4884]: E1128 18:19:05.704327 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:19:05 crc kubenswrapper[4884]: I1128 18:19:05.951956 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:19:06 crc kubenswrapper[4884]: I1128 18:19:06.007844 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:19:06 crc kubenswrapper[4884]: I1128 18:19:06.195426 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.286475 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7hzz8" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="registry-server" containerID="cri-o://c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad" gracePeriod=2 Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.819704 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.945418 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities\") pod \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.945809 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftdhs\" (UniqueName: \"kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs\") pod \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.945901 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content\") pod \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\" (UID: \"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1\") " Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.949028 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities" (OuterVolumeSpecName: "utilities") pod "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" (UID: "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:19:07 crc kubenswrapper[4884]: I1128 18:19:07.956512 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs" (OuterVolumeSpecName: "kube-api-access-ftdhs") pod "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" (UID: "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1"). InnerVolumeSpecName "kube-api-access-ftdhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.048440 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.048468 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftdhs\" (UniqueName: \"kubernetes.io/projected/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-kube-api-access-ftdhs\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.058536 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" (UID: "1e12cace-404b-4bc8-9f0a-b7301f5d6ba1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.150074 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.296886 4884 generic.go:334] "Generic (PLEG): container finished" podID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerID="c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad" exitCode=0 Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.296942 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7hzz8" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.296944 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerDied","Data":"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad"} Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.297103 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7hzz8" event={"ID":"1e12cace-404b-4bc8-9f0a-b7301f5d6ba1","Type":"ContainerDied","Data":"ee767957d4e3bb0fd37f301e34b466923add53c27d8436ff95abb817a9b605ac"} Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.297139 4884 scope.go:117] "RemoveContainer" containerID="c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.321904 4884 scope.go:117] "RemoveContainer" containerID="717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.349339 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.363876 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7hzz8"] Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.366342 4884 scope.go:117] "RemoveContainer" containerID="0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.404183 4884 scope.go:117] "RemoveContainer" containerID="c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad" Nov 28 18:19:08 crc kubenswrapper[4884]: E1128 18:19:08.404961 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad\": container with ID starting with c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad not found: ID does not exist" containerID="c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.405000 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad"} err="failed to get container status \"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad\": rpc error: code = NotFound desc = could not find container \"c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad\": container with ID starting with c348257c609147260dac1fcf2ccea4dacf0693315ef7868eb84b431086b307ad not found: ID does not exist" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.405026 4884 scope.go:117] "RemoveContainer" containerID="717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27" Nov 28 18:19:08 crc kubenswrapper[4884]: E1128 18:19:08.405766 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27\": container with ID starting with 717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27 not found: ID does not exist" containerID="717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.405787 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27"} err="failed to get container status \"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27\": rpc error: code = NotFound desc = could not find container \"717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27\": container with ID starting with 717ecf61d0e83e96551b617c7ea67b3c598b31323d4bf9a013bcbee7a96edb27 not found: ID does not exist" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.405800 4884 scope.go:117] "RemoveContainer" containerID="0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76" Nov 28 18:19:08 crc kubenswrapper[4884]: E1128 18:19:08.406617 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76\": container with ID starting with 0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76 not found: ID does not exist" containerID="0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.406638 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76"} err="failed to get container status \"0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76\": rpc error: code = NotFound desc = could not find container \"0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76\": container with ID starting with 0ce5a58cd546204f9c62f412edb91595cdb6cca566837155bad601459fa09e76 not found: ID does not exist" Nov 28 18:19:08 crc kubenswrapper[4884]: I1128 18:19:08.702497 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" path="/var/lib/kubelet/pods/1e12cace-404b-4bc8-9f0a-b7301f5d6ba1/volumes" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.265131 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:11 crc kubenswrapper[4884]: E1128 18:19:11.266081 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="extract-content" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.266111 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="extract-content" Nov 28 18:19:11 crc kubenswrapper[4884]: E1128 18:19:11.266132 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="extract-utilities" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.266139 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="extract-utilities" Nov 28 18:19:11 crc kubenswrapper[4884]: E1128 18:19:11.266166 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="registry-server" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.266172 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="registry-server" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.266378 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e12cace-404b-4bc8-9f0a-b7301f5d6ba1" containerName="registry-server" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.267898 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.282316 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.440774 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.440871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.440951 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg6wf\" (UniqueName: \"kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.542991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.543071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.543160 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg6wf\" (UniqueName: \"kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.543660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.543676 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.561776 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg6wf\" (UniqueName: \"kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf\") pod \"redhat-marketplace-4xh7j\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:11 crc kubenswrapper[4884]: I1128 18:19:11.594521 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:12 crc kubenswrapper[4884]: I1128 18:19:12.079401 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:12 crc kubenswrapper[4884]: I1128 18:19:12.346776 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ec5032-6598-4238-827c-3003eb104021" containerID="6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec" exitCode=0 Nov 28 18:19:12 crc kubenswrapper[4884]: I1128 18:19:12.346844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerDied","Data":"6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec"} Nov 28 18:19:12 crc kubenswrapper[4884]: I1128 18:19:12.347120 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerStarted","Data":"b14a3bfec6be21c9fd3b9f1e2475486c73cb75d21fa5432dd97cb7857086bb4a"} Nov 28 18:19:13 crc kubenswrapper[4884]: I1128 18:19:13.365973 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerStarted","Data":"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0"} Nov 28 18:19:14 crc kubenswrapper[4884]: I1128 18:19:14.383917 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ec5032-6598-4238-827c-3003eb104021" containerID="8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0" exitCode=0 Nov 28 18:19:14 crc kubenswrapper[4884]: I1128 18:19:14.384000 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerDied","Data":"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0"} Nov 28 18:19:15 crc kubenswrapper[4884]: I1128 18:19:15.401709 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerStarted","Data":"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd"} Nov 28 18:19:15 crc kubenswrapper[4884]: I1128 18:19:15.431894 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4xh7j" podStartSLOduration=1.925101484 podStartE2EDuration="4.431870648s" podCreationTimestamp="2025-11-28 18:19:11 +0000 UTC" firstStartedPulling="2025-11-28 18:19:12.352284186 +0000 UTC m=+10791.915067987" lastFinishedPulling="2025-11-28 18:19:14.85905335 +0000 UTC m=+10794.421837151" observedRunningTime="2025-11-28 18:19:15.420252045 +0000 UTC m=+10794.983035846" watchObservedRunningTime="2025-11-28 18:19:15.431870648 +0000 UTC m=+10794.994654459" Nov 28 18:19:17 crc kubenswrapper[4884]: I1128 18:19:17.888848 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-qcdlm_723c2bb6-327d-4a9c-8590-03fad405f0e9/kube-rbac-proxy/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.178471 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-qcdlm_723c2bb6-327d-4a9c-8590-03fad405f0e9/controller/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.292674 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-frr-files/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.551044 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-reloader/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.559907 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-reloader/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.560639 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-frr-files/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.570516 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-metrics/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.783932 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-reloader/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.816192 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-frr-files/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.822522 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-metrics/0.log" Nov 28 18:19:18 crc kubenswrapper[4884]: I1128 18:19:18.839750 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-metrics/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.003742 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-frr-files/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.023354 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-reloader/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.029366 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/cp-metrics/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.146635 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/controller/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.211986 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/frr-metrics/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.222821 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/kube-rbac-proxy/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.380030 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/kube-rbac-proxy-frr/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.473953 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/reloader/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.645327 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-mmq74_7919a63a-0933-4ed1-a9e7-3235165603b9/frr-k8s-webhook-server/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.923770 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-74588d6766-r92s8_f1162fe6-2514-476c-b62c-ce0e88c06488/manager/0.log" Nov 28 18:19:19 crc kubenswrapper[4884]: I1128 18:19:19.984847 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-8c64fc959-7s75n_40f0f89b-3ee3-42ac-a5c3-3e3f5f042bdf/webhook-server/0.log" Nov 28 18:19:20 crc kubenswrapper[4884]: I1128 18:19:20.151871 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7vzh9_84cf5151-9588-4977-ac9b-c629f14f95c4/kube-rbac-proxy/0.log" Nov 28 18:19:20 crc kubenswrapper[4884]: I1128 18:19:20.704729 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:19:20 crc kubenswrapper[4884]: E1128 18:19:20.715859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:19:21 crc kubenswrapper[4884]: I1128 18:19:21.047805 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-7vzh9_84cf5151-9588-4977-ac9b-c629f14f95c4/speaker/0.log" Nov 28 18:19:21 crc kubenswrapper[4884]: I1128 18:19:21.594669 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:21 crc kubenswrapper[4884]: I1128 18:19:21.594711 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:21 crc kubenswrapper[4884]: I1128 18:19:21.646904 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:22 crc kubenswrapper[4884]: I1128 18:19:22.546988 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:22 crc kubenswrapper[4884]: I1128 18:19:22.597829 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:22 crc kubenswrapper[4884]: I1128 18:19:22.670331 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-8ct25_80b94e5d-88d1-435d-8cb8-35c0e2fab7dd/frr/0.log" Nov 28 18:19:24 crc kubenswrapper[4884]: I1128 18:19:24.513787 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4xh7j" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="registry-server" containerID="cri-o://3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd" gracePeriod=2 Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.154156 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.232637 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content\") pod \"54ec5032-6598-4238-827c-3003eb104021\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.232811 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities\") pod \"54ec5032-6598-4238-827c-3003eb104021\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.232947 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg6wf\" (UniqueName: \"kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf\") pod \"54ec5032-6598-4238-827c-3003eb104021\" (UID: \"54ec5032-6598-4238-827c-3003eb104021\") " Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.233801 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities" (OuterVolumeSpecName: "utilities") pod "54ec5032-6598-4238-827c-3003eb104021" (UID: "54ec5032-6598-4238-827c-3003eb104021"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.238948 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf" (OuterVolumeSpecName: "kube-api-access-sg6wf") pod "54ec5032-6598-4238-827c-3003eb104021" (UID: "54ec5032-6598-4238-827c-3003eb104021"). InnerVolumeSpecName "kube-api-access-sg6wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.257983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "54ec5032-6598-4238-827c-3003eb104021" (UID: "54ec5032-6598-4238-827c-3003eb104021"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.335188 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg6wf\" (UniqueName: \"kubernetes.io/projected/54ec5032-6598-4238-827c-3003eb104021-kube-api-access-sg6wf\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.335227 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.335240 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/54ec5032-6598-4238-827c-3003eb104021-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.526428 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ec5032-6598-4238-827c-3003eb104021" containerID="3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd" exitCode=0 Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.526650 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerDied","Data":"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd"} Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.527789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4xh7j" event={"ID":"54ec5032-6598-4238-827c-3003eb104021","Type":"ContainerDied","Data":"b14a3bfec6be21c9fd3b9f1e2475486c73cb75d21fa5432dd97cb7857086bb4a"} Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.527901 4884 scope.go:117] "RemoveContainer" containerID="3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.526737 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4xh7j" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.561000 4884 scope.go:117] "RemoveContainer" containerID="8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.579317 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.587211 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4xh7j"] Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.612449 4884 scope.go:117] "RemoveContainer" containerID="6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.637434 4884 scope.go:117] "RemoveContainer" containerID="3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd" Nov 28 18:19:25 crc kubenswrapper[4884]: E1128 18:19:25.638726 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd\": container with ID starting with 3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd not found: ID does not exist" containerID="3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.638761 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd"} err="failed to get container status \"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd\": rpc error: code = NotFound desc = could not find container \"3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd\": container with ID starting with 3d046248b6c6fc996e3f12f4d3a206ffa65d05f9df61836df95afcab319d62bd not found: ID does not exist" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.638781 4884 scope.go:117] "RemoveContainer" containerID="8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0" Nov 28 18:19:25 crc kubenswrapper[4884]: E1128 18:19:25.639336 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0\": container with ID starting with 8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0 not found: ID does not exist" containerID="8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.639360 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0"} err="failed to get container status \"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0\": rpc error: code = NotFound desc = could not find container \"8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0\": container with ID starting with 8fd758425beb8f42c72093e7852703e48f29eb7864ad616bedf7684d293fc8a0 not found: ID does not exist" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.639402 4884 scope.go:117] "RemoveContainer" containerID="6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec" Nov 28 18:19:25 crc kubenswrapper[4884]: E1128 18:19:25.640788 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec\": container with ID starting with 6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec not found: ID does not exist" containerID="6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec" Nov 28 18:19:25 crc kubenswrapper[4884]: I1128 18:19:25.641510 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec"} err="failed to get container status \"6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec\": rpc error: code = NotFound desc = could not find container \"6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec\": container with ID starting with 6f1864d10d0fc721def44a795c831b623e51131e676f69cf9c331c49a28f48ec not found: ID does not exist" Nov 28 18:19:26 crc kubenswrapper[4884]: I1128 18:19:26.704152 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54ec5032-6598-4238-827c-3003eb104021" path="/var/lib/kubelet/pods/54ec5032-6598-4238-827c-3003eb104021/volumes" Nov 28 18:19:32 crc kubenswrapper[4884]: I1128 18:19:32.691761 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:19:32 crc kubenswrapper[4884]: E1128 18:19:32.692574 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.072017 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/util/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.382742 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/util/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.385838 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/pull/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.408792 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/pull/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.559294 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/util/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.606427 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/extract/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.637666 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931abmtb6_b7ff8f64-84f8-4079-9a59-dfd65a750348/pull/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.765181 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/util/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.980589 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/pull/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.994803 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/util/0.log" Nov 28 18:19:35 crc kubenswrapper[4884]: I1128 18:19:35.995618 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/pull/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.188834 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/extract/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.192140 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/pull/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.223966 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ft2p8l_08c41673-b3b0-4766-ae1a-52953e56772b/util/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.380441 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/util/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.539320 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/util/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.581869 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/pull/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.588460 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/pull/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.823469 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/pull/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.827274 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/extract/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.840715 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c921042jhs_1dc31ebd-f8ef-4a7a-8b40-6f27ba4de8bd/util/0.log" Nov 28 18:19:36 crc kubenswrapper[4884]: I1128 18:19:36.970772 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/util/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.177232 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/pull/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.223228 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/util/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.268527 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/pull/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.382161 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/pull/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.429981 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/util/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.494499 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f834vbdw_ac2d8399-8722-44d6-ac8f-57be77e84017/extract/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.569899 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-utilities/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.808599 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-content/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.861303 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-content/0.log" Nov 28 18:19:37 crc kubenswrapper[4884]: I1128 18:19:37.876278 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-utilities/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.063370 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-utilities/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.292981 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/extract-content/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.452679 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-utilities/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.704873 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-j24qw_b43cc6bf-3711-4f18-aa8e-881c9e48df55/registry-server/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.766171 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-content/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.772549 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-utilities/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.801829 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-content/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.949171 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-content/0.log" Nov 28 18:19:38 crc kubenswrapper[4884]: I1128 18:19:38.979061 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/extract-utilities/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.274511 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4mfmn_596c2102-9c67-495b-8080-beed3c62c0e1/registry-server/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.280563 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-utilities/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.327161 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-54xz2_c624c933-d370-42a6-ae60-1bc50e004476/marketplace-operator/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.491360 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-utilities/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.492596 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-content/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.535426 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-content/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.676355 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-utilities/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.692846 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/extract-content/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.786446 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-utilities/0.log" Nov 28 18:19:39 crc kubenswrapper[4884]: I1128 18:19:39.871129 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-2vm77_5936360b-de60-4fce-9974-8c2fbd0a113a/registry-server/0.log" Nov 28 18:19:40 crc kubenswrapper[4884]: I1128 18:19:40.007237 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-utilities/0.log" Nov 28 18:19:40 crc kubenswrapper[4884]: I1128 18:19:40.015477 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-content/0.log" Nov 28 18:19:40 crc kubenswrapper[4884]: I1128 18:19:40.019894 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-content/0.log" Nov 28 18:19:40 crc kubenswrapper[4884]: I1128 18:19:40.173905 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-utilities/0.log" Nov 28 18:19:40 crc kubenswrapper[4884]: I1128 18:19:40.201552 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/extract-content/0.log" Nov 28 18:19:41 crc kubenswrapper[4884]: I1128 18:19:41.419968 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c8fbg_a17cada8-f3df-43e7-b073-61ae4ebec3a2/registry-server/0.log" Nov 28 18:19:46 crc kubenswrapper[4884]: I1128 18:19:46.688123 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:19:46 crc kubenswrapper[4884]: E1128 18:19:46.689039 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:19:53 crc kubenswrapper[4884]: I1128 18:19:53.150821 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-9tckd_9f95eadf-8c98-4cbc-bc58-f0454043ee6b/prometheus-operator/0.log" Nov 28 18:19:53 crc kubenswrapper[4884]: I1128 18:19:53.322675 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-56f5485cb6-xgv4k_527a6443-b807-4583-b7f5-6307ba1cade7/prometheus-operator-admission-webhook/0.log" Nov 28 18:19:53 crc kubenswrapper[4884]: I1128 18:19:53.403694 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-56f5485cb6-zfn9g_6773cf61-55c4-4432-bc98-92b878f74b05/prometheus-operator-admission-webhook/0.log" Nov 28 18:19:53 crc kubenswrapper[4884]: I1128 18:19:53.538162 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-tw7tv_320957a8-3581-4e31-96e6-95f80a3cfcce/operator/0.log" Nov 28 18:19:53 crc kubenswrapper[4884]: I1128 18:19:53.592626 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-vqxbd_4d1cc7c0-4594-4443-80a8-237320e0138e/perses-operator/0.log" Nov 28 18:20:01 crc kubenswrapper[4884]: I1128 18:20:01.688436 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:20:01 crc kubenswrapper[4884]: E1128 18:20:01.689299 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:20:13 crc kubenswrapper[4884]: I1128 18:20:13.689537 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:20:13 crc kubenswrapper[4884]: E1128 18:20:13.690514 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:20:26 crc kubenswrapper[4884]: I1128 18:20:26.690959 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:20:26 crc kubenswrapper[4884]: E1128 18:20:26.693799 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:20:38 crc kubenswrapper[4884]: I1128 18:20:38.688829 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:20:38 crc kubenswrapper[4884]: E1128 18:20:38.690416 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:20:53 crc kubenswrapper[4884]: I1128 18:20:53.688730 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:20:53 crc kubenswrapper[4884]: E1128 18:20:53.689418 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:08 crc kubenswrapper[4884]: I1128 18:21:08.688607 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:21:08 crc kubenswrapper[4884]: E1128 18:21:08.689576 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:20 crc kubenswrapper[4884]: I1128 18:21:20.707799 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:21:20 crc kubenswrapper[4884]: E1128 18:21:20.710605 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.397357 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:31 crc kubenswrapper[4884]: E1128 18:21:31.398921 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="extract-content" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.398956 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="extract-content" Nov 28 18:21:31 crc kubenswrapper[4884]: E1128 18:21:31.398977 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="extract-utilities" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.398983 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="extract-utilities" Nov 28 18:21:31 crc kubenswrapper[4884]: E1128 18:21:31.399035 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="registry-server" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.399043 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="registry-server" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.399428 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="54ec5032-6598-4238-827c-3003eb104021" containerName="registry-server" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.402388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.417029 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.572695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.572802 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqwsh\" (UniqueName: \"kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.572877 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.675296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.675603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqwsh\" (UniqueName: \"kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.675677 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.676062 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.676112 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.688463 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:21:31 crc kubenswrapper[4884]: E1128 18:21:31.688823 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.709014 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqwsh\" (UniqueName: \"kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh\") pod \"community-operators-jqffn\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:31 crc kubenswrapper[4884]: I1128 18:21:31.754653 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:32 crc kubenswrapper[4884]: I1128 18:21:32.345558 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:32 crc kubenswrapper[4884]: I1128 18:21:32.996143 4884 generic.go:334] "Generic (PLEG): container finished" podID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerID="9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a" exitCode=0 Nov 28 18:21:32 crc kubenswrapper[4884]: I1128 18:21:32.996585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerDied","Data":"9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a"} Nov 28 18:21:32 crc kubenswrapper[4884]: I1128 18:21:32.996664 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerStarted","Data":"607028514c920a9ff738fa2a7e0bb4a71efd93b02bb8961da415317c6907094e"} Nov 28 18:21:34 crc kubenswrapper[4884]: I1128 18:21:34.007725 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerStarted","Data":"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06"} Nov 28 18:21:35 crc kubenswrapper[4884]: I1128 18:21:35.020636 4884 generic.go:334] "Generic (PLEG): container finished" podID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerID="4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06" exitCode=0 Nov 28 18:21:35 crc kubenswrapper[4884]: I1128 18:21:35.020769 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerDied","Data":"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06"} Nov 28 18:21:36 crc kubenswrapper[4884]: I1128 18:21:36.045291 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerStarted","Data":"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90"} Nov 28 18:21:36 crc kubenswrapper[4884]: I1128 18:21:36.067267 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jqffn" podStartSLOduration=2.413128938 podStartE2EDuration="5.067248166s" podCreationTimestamp="2025-11-28 18:21:31 +0000 UTC" firstStartedPulling="2025-11-28 18:21:33.000269553 +0000 UTC m=+10932.563053394" lastFinishedPulling="2025-11-28 18:21:35.654388801 +0000 UTC m=+10935.217172622" observedRunningTime="2025-11-28 18:21:36.060912792 +0000 UTC m=+10935.623696603" watchObservedRunningTime="2025-11-28 18:21:36.067248166 +0000 UTC m=+10935.630031967" Nov 28 18:21:41 crc kubenswrapper[4884]: I1128 18:21:41.755563 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:41 crc kubenswrapper[4884]: I1128 18:21:41.756350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:41 crc kubenswrapper[4884]: I1128 18:21:41.805574 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:42 crc kubenswrapper[4884]: I1128 18:21:42.206974 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:42 crc kubenswrapper[4884]: I1128 18:21:42.266876 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:43 crc kubenswrapper[4884]: I1128 18:21:43.687938 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:21:43 crc kubenswrapper[4884]: E1128 18:21:43.688630 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:44 crc kubenswrapper[4884]: I1128 18:21:44.153374 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jqffn" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="registry-server" containerID="cri-o://a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90" gracePeriod=2 Nov 28 18:21:44 crc kubenswrapper[4884]: E1128 18:21:44.438134 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f8cb431_8205_4e0c_ba37_e4a1b935bf8a.slice/crio-a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90.scope\": RecentStats: unable to find data in memory cache]" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.709638 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.808841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqwsh\" (UniqueName: \"kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh\") pod \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.808892 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content\") pod \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.809319 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities\") pod \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\" (UID: \"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a\") " Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.810135 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities" (OuterVolumeSpecName: "utilities") pod "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" (UID: "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.816702 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh" (OuterVolumeSpecName: "kube-api-access-kqwsh") pod "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" (UID: "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a"). InnerVolumeSpecName "kube-api-access-kqwsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.864190 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" (UID: "1f8cb431-8205-4e0c-ba37-e4a1b935bf8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.912575 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.912604 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqwsh\" (UniqueName: \"kubernetes.io/projected/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-kube-api-access-kqwsh\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:44.912616 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.168154 4884 generic.go:334] "Generic (PLEG): container finished" podID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerID="a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90" exitCode=0 Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.168203 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerDied","Data":"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90"} Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.168235 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqffn" event={"ID":"1f8cb431-8205-4e0c-ba37-e4a1b935bf8a","Type":"ContainerDied","Data":"607028514c920a9ff738fa2a7e0bb4a71efd93b02bb8961da415317c6907094e"} Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.168249 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqffn" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.168261 4884 scope.go:117] "RemoveContainer" containerID="a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.239489 4884 scope.go:117] "RemoveContainer" containerID="4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.243702 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.257949 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jqffn"] Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.285938 4884 scope.go:117] "RemoveContainer" containerID="9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.325347 4884 scope.go:117] "RemoveContainer" containerID="a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90" Nov 28 18:21:45 crc kubenswrapper[4884]: E1128 18:21:45.325810 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90\": container with ID starting with a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90 not found: ID does not exist" containerID="a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.325866 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90"} err="failed to get container status \"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90\": rpc error: code = NotFound desc = could not find container \"a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90\": container with ID starting with a55c82530c349bf88af3f0f3def26d8fdd4fc9e43a75862ea45edc796c9c5d90 not found: ID does not exist" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.325902 4884 scope.go:117] "RemoveContainer" containerID="4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06" Nov 28 18:21:45 crc kubenswrapper[4884]: E1128 18:21:45.326541 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06\": container with ID starting with 4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06 not found: ID does not exist" containerID="4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.326574 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06"} err="failed to get container status \"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06\": rpc error: code = NotFound desc = could not find container \"4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06\": container with ID starting with 4d8537dcd45647469ec6e30d69c4598e4d4b044dab46024d8800027810329d06 not found: ID does not exist" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.326601 4884 scope.go:117] "RemoveContainer" containerID="9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a" Nov 28 18:21:45 crc kubenswrapper[4884]: E1128 18:21:45.327129 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a\": container with ID starting with 9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a not found: ID does not exist" containerID="9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a" Nov 28 18:21:45 crc kubenswrapper[4884]: I1128 18:21:45.327177 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a"} err="failed to get container status \"9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a\": rpc error: code = NotFound desc = could not find container \"9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a\": container with ID starting with 9a8cd19f4c688da577944e4607bd3fa7ec26761f3745b67f98d3a8de1fe13a3a not found: ID does not exist" Nov 28 18:21:46 crc kubenswrapper[4884]: I1128 18:21:46.703641 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" path="/var/lib/kubelet/pods/1f8cb431-8205-4e0c-ba37-e4a1b935bf8a/volumes" Nov 28 18:21:54 crc kubenswrapper[4884]: I1128 18:21:54.691829 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:21:54 crc kubenswrapper[4884]: E1128 18:21:54.694351 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:21:56 crc kubenswrapper[4884]: I1128 18:21:56.304265 4884 generic.go:334] "Generic (PLEG): container finished" podID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerID="ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18" exitCode=0 Nov 28 18:21:56 crc kubenswrapper[4884]: I1128 18:21:56.304343 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" event={"ID":"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3","Type":"ContainerDied","Data":"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18"} Nov 28 18:21:56 crc kubenswrapper[4884]: I1128 18:21:56.305734 4884 scope.go:117] "RemoveContainer" containerID="ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18" Nov 28 18:21:56 crc kubenswrapper[4884]: I1128 18:21:56.639395 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cdhk7_must-gather-b6mw5_0d7e07e4-8800-4d69-a6ee-b1be14c22cd3/gather/0.log" Nov 28 18:22:05 crc kubenswrapper[4884]: I1128 18:22:05.669914 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-cdhk7/must-gather-b6mw5"] Nov 28 18:22:05 crc kubenswrapper[4884]: I1128 18:22:05.670949 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="copy" containerID="cri-o://9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c" gracePeriod=2 Nov 28 18:22:05 crc kubenswrapper[4884]: I1128 18:22:05.685070 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-cdhk7/must-gather-b6mw5"] Nov 28 18:22:05 crc kubenswrapper[4884]: I1128 18:22:05.690360 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:22:05 crc kubenswrapper[4884]: E1128 18:22:05.690742 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.168184 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cdhk7_must-gather-b6mw5_0d7e07e4-8800-4d69-a6ee-b1be14c22cd3/copy/0.log" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.169269 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.212022 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output\") pod \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.212133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxvhb\" (UniqueName: \"kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb\") pod \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\" (UID: \"0d7e07e4-8800-4d69-a6ee-b1be14c22cd3\") " Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.218781 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb" (OuterVolumeSpecName: "kube-api-access-pxvhb") pod "0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" (UID: "0d7e07e4-8800-4d69-a6ee-b1be14c22cd3"). InnerVolumeSpecName "kube-api-access-pxvhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.314938 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxvhb\" (UniqueName: \"kubernetes.io/projected/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-kube-api-access-pxvhb\") on node \"crc\" DevicePath \"\"" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.410057 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" (UID: "0d7e07e4-8800-4d69-a6ee-b1be14c22cd3"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.416899 4884 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.443240 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-cdhk7_must-gather-b6mw5_0d7e07e4-8800-4d69-a6ee-b1be14c22cd3/copy/0.log" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.443764 4884 generic.go:334] "Generic (PLEG): container finished" podID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerID="9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c" exitCode=143 Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.443829 4884 scope.go:117] "RemoveContainer" containerID="9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.443912 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-cdhk7/must-gather-b6mw5" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.479313 4884 scope.go:117] "RemoveContainer" containerID="ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.584271 4884 scope.go:117] "RemoveContainer" containerID="9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c" Nov 28 18:22:06 crc kubenswrapper[4884]: E1128 18:22:06.584917 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c\": container with ID starting with 9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c not found: ID does not exist" containerID="9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.584971 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c"} err="failed to get container status \"9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c\": rpc error: code = NotFound desc = could not find container \"9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c\": container with ID starting with 9a17eee19a8548fd661ce3575edef6c8ddca2c91ab10aba871b36a2b55a6f97c not found: ID does not exist" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.584997 4884 scope.go:117] "RemoveContainer" containerID="ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18" Nov 28 18:22:06 crc kubenswrapper[4884]: E1128 18:22:06.585492 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18\": container with ID starting with ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18 not found: ID does not exist" containerID="ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.585567 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18"} err="failed to get container status \"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18\": rpc error: code = NotFound desc = could not find container \"ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18\": container with ID starting with ff35e491f442a150b17f5e156f22b3f2c5f87c71430f4aa8a043bf0a7ab1dd18 not found: ID does not exist" Nov 28 18:22:06 crc kubenswrapper[4884]: I1128 18:22:06.702033 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" path="/var/lib/kubelet/pods/0d7e07e4-8800-4d69-a6ee-b1be14c22cd3/volumes" Nov 28 18:22:17 crc kubenswrapper[4884]: I1128 18:22:17.688560 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:22:17 crc kubenswrapper[4884]: E1128 18:22:17.689460 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-pwcbp_openshift-machine-config-operator(120c26c6-4231-418f-a5af-738dc44915f8)\"" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" Nov 28 18:22:32 crc kubenswrapper[4884]: I1128 18:22:32.689148 4884 scope.go:117] "RemoveContainer" containerID="2e0a688692c64d1109374022959684c3ad4d0c81dfae8b447a4bd6519c6c46d3" Nov 28 18:22:33 crc kubenswrapper[4884]: I1128 18:22:33.815142 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" event={"ID":"120c26c6-4231-418f-a5af-738dc44915f8","Type":"ContainerStarted","Data":"85e735243e36955f9b9a1fdcb5742c65a7fc717a9ffd5f1011e61552dcc90fef"} Nov 28 18:24:51 crc kubenswrapper[4884]: I1128 18:24:51.242761 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:24:51 crc kubenswrapper[4884]: I1128 18:24:51.245127 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:25:21 crc kubenswrapper[4884]: I1128 18:25:21.243617 4884 patch_prober.go:28] interesting pod/machine-config-daemon-pwcbp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 18:25:21 crc kubenswrapper[4884]: I1128 18:25:21.244283 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-pwcbp" podUID="120c26c6-4231-418f-a5af-738dc44915f8" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.774235 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n4hhs"] Nov 28 18:25:36 crc kubenswrapper[4884]: E1128 18:25:36.775988 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="extract-content" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776024 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="extract-content" Nov 28 18:25:36 crc kubenswrapper[4884]: E1128 18:25:36.776072 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="copy" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776122 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="copy" Nov 28 18:25:36 crc kubenswrapper[4884]: E1128 18:25:36.776158 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="gather" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776176 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="gather" Nov 28 18:25:36 crc kubenswrapper[4884]: E1128 18:25:36.776225 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="registry-server" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776241 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="registry-server" Nov 28 18:25:36 crc kubenswrapper[4884]: E1128 18:25:36.776292 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="extract-utilities" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776309 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="extract-utilities" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776868 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f8cb431-8205-4e0c-ba37-e4a1b935bf8a" containerName="registry-server" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776917 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="copy" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.776990 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d7e07e4-8800-4d69-a6ee-b1be14c22cd3" containerName="gather" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.788049 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.789340 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4hhs"] Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.927715 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-catalog-content\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.927953 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzmz4\" (UniqueName: \"kubernetes.io/projected/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-kube-api-access-dzmz4\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:36 crc kubenswrapper[4884]: I1128 18:25:36.928184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-utilities\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.029807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzmz4\" (UniqueName: \"kubernetes.io/projected/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-kube-api-access-dzmz4\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.030262 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-utilities\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.030401 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-catalog-content\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.030860 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-catalog-content\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.030854 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-utilities\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.049504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzmz4\" (UniqueName: \"kubernetes.io/projected/d517e1a9-822b-4d8a-9307-5f7b7fbafd2a-kube-api-access-dzmz4\") pod \"certified-operators-n4hhs\" (UID: \"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a\") " pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.133135 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n4hhs" Nov 28 18:25:37 crc kubenswrapper[4884]: I1128 18:25:37.868683 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n4hhs"] Nov 28 18:25:38 crc kubenswrapper[4884]: I1128 18:25:38.096025 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerStarted","Data":"5e882fbc2ab71866ca2d9ed98ff2938878796da958c9e3c9b7a59566c1d7e75a"} Nov 28 18:25:38 crc kubenswrapper[4884]: I1128 18:25:38.096064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerStarted","Data":"a8e97058e5385fed571e50c6a82ed0caa314d453c9d390d6bf2dbd71f1558b10"} Nov 28 18:25:39 crc kubenswrapper[4884]: I1128 18:25:39.110986 4884 generic.go:334] "Generic (PLEG): container finished" podID="d517e1a9-822b-4d8a-9307-5f7b7fbafd2a" containerID="5e882fbc2ab71866ca2d9ed98ff2938878796da958c9e3c9b7a59566c1d7e75a" exitCode=0 Nov 28 18:25:39 crc kubenswrapper[4884]: I1128 18:25:39.111080 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerDied","Data":"5e882fbc2ab71866ca2d9ed98ff2938878796da958c9e3c9b7a59566c1d7e75a"} Nov 28 18:25:39 crc kubenswrapper[4884]: I1128 18:25:39.114070 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 18:25:41 crc kubenswrapper[4884]: I1128 18:25:41.137681 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerStarted","Data":"136256cfe0054ec7d0c0a82f1f1e8203e19d575c029b471c4c0c1240720b85a4"} Nov 28 18:25:42 crc kubenswrapper[4884]: I1128 18:25:42.154870 4884 generic.go:334] "Generic (PLEG): container finished" podID="d517e1a9-822b-4d8a-9307-5f7b7fbafd2a" containerID="136256cfe0054ec7d0c0a82f1f1e8203e19d575c029b471c4c0c1240720b85a4" exitCode=0 Nov 28 18:25:42 crc kubenswrapper[4884]: I1128 18:25:42.154989 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerDied","Data":"136256cfe0054ec7d0c0a82f1f1e8203e19d575c029b471c4c0c1240720b85a4"} Nov 28 18:25:43 crc kubenswrapper[4884]: I1128 18:25:43.170677 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n4hhs" event={"ID":"d517e1a9-822b-4d8a-9307-5f7b7fbafd2a","Type":"ContainerStarted","Data":"d767f72fdb00c17b0123c456d9263d782ac047caf862f1a9db6dadfa41c6cb2a"} Nov 28 18:25:43 crc kubenswrapper[4884]: I1128 18:25:43.193763 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n4hhs" podStartSLOduration=3.681894019 podStartE2EDuration="7.19374014s" podCreationTimestamp="2025-11-28 18:25:36 +0000 UTC" firstStartedPulling="2025-11-28 18:25:39.113766855 +0000 UTC m=+11178.676550666" lastFinishedPulling="2025-11-28 18:25:42.625612946 +0000 UTC m=+11182.188396787" observedRunningTime="2025-11-28 18:25:43.188576205 +0000 UTC m=+11182.751360056" watchObservedRunningTime="2025-11-28 18:25:43.19374014 +0000 UTC m=+11182.756523951" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112364461024450 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112364462017366 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112336232016503 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112336232015453 5ustar corecore